[yt-svn] commit/yt: 54 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Wed Feb 3 09:15:18 PST 2016
54 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/62ce6606862e/
Changeset: 62ce6606862e
Branch: yt
User: brittonsmith
Date: 2015-12-16 12:21:05+00:00
Summary: Adding initial functions for halo objects (getting member particles from halo catalogs).
Affected #: 1 file
diff -r a065f0f3bd81c1906c2d5cc389fbfa07878ca4b3 -r 62ce6606862eff5f915f379e76b6d94c704d0bf7 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -15,6 +15,7 @@
#-----------------------------------------------------------------------------
from collections import defaultdict
+from functools import partial
from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import stat
@@ -123,6 +124,10 @@
super(GadgetFOFDataset, self).__init__(filename, dataset_type,
units_override=units_override)
+ def _setup_classes(self):
+ super(GadgetFOFDataset, self)._setup_classes()
+ self.halo = partial(GadgetFOFHaloDataset, self)
+
def _parse_parameter_file(self):
handle = h5py.File(self.parameter_filename, mode="r")
hvals = {}
@@ -222,6 +227,9 @@
time_unit = (1., "s")
self.time_unit = self.quan(time_unit[0], time_unit[1])
+ def __repr__(self):
+ return self.basename.split(".", 1)[0]
+
@classmethod
def _is_valid(self, *args, **kwargs):
need_groups = ['Group', 'Header', 'Subhalo']
@@ -236,3 +244,23 @@
valid = False
pass
return valid
+
+class GadgetFOFHaloDataset(Dataset):
+ _index_class = GadgetFOFParticleIndex
+ _file_class = GadgetFOFHDF5File
+ _field_info_class = GadgetFOFFieldInfo
+ _suffix = ".hdf5"
+
+ def __init__(self, ds, ptype, particle_identifier):
+ self.ds = ds
+ if ptype not in ds.particle_types_raw:
+ raise RuntimeError("Possible halo types are %s, supplied \"%s\"." %
+ (ds.particle_types_raw, ptype))
+ self.ptype = ptype
+ self.particle_identifier = particle_identifier
+
+ def __repr__(self):
+ return "%s_%s_%09d" % (self.ds, self.ptype, self.particle_identifier)
+
+ def _setup_classes(self):
+ pass
https://bitbucket.org/yt_analysis/yt/commits/17f761a6a262/
Changeset: 17f761a6a262
Branch: yt
User: brittonsmith
Date: 2015-12-16 15:14:27+00:00
Summary: Adding field info for halo datasets.
Affected #: 1 file
diff -r 62ce6606862eff5f915f379e76b6d94c704d0bf7 -r 17f761a6a26296791d0fc109bb63815a54125a8e yt/frontends/gadget_fof/fields.py
--- a/yt/frontends/gadget_fof/fields.py
+++ b/yt/frontends/gadget_fof/fields.py
@@ -56,3 +56,11 @@
("", "particle_number"),
("", "particle_ones"),
)
+
+class GadgetFOFHaloFieldInfo(FieldInfoContainer):
+ known_other_fields = (
+ )
+
+ known_particle_fields = (
+ ("ID", ("", ["Group", "particle_identifier"], None)),
+ )
https://bitbucket.org/yt_analysis/yt/commits/8ce310dc635c/
Changeset: 8ce310dc635c
Branch: yt
User: brittonsmith
Date: 2015-12-16 15:19:41+00:00
Summary: Right now we can create the halo dataset and the index.
Affected #: 2 files
diff -r 17f761a6a26296791d0fc109bb63815a54125a8e -r 8ce310dc635c5573bacaf59bcf69a087b398e721 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -23,7 +23,8 @@
import os
from yt.frontends.gadget_fof.fields import \
- GadgetFOFFieldInfo
+ GadgetFOFFieldInfo, \
+ GadgetFOFHaloFieldInfo
from yt.utilities.cosmology import \
Cosmology
@@ -72,6 +73,21 @@
for i, data_file in enumerate(self.data_files):
data_file.offset_files = self.data_files[istart[i]: iend[i] + 1]
+ def _create_halo_id_table(self):
+ """
+ Create a list of halo start ids so we know which file
+ contains particles for a given halo.
+ """
+ self._halo_index_start = \
+ dict([(ptype, np.array([dom.index_start[ptype]
+ for dom in self.data_files]))
+ for ptype in self.ds.particle_types_raw])
+ self._halo_index_end = \
+ dict([(ptype, np.array([dom.total_particles[ptype]
+ for dom in self.data_files]) +
+ self._halo_index_start[ptype])
+ for ptype in self.ds.particle_types_raw])
+
def _detect_output_fields(self):
# TODO: Add additional fields
dsl = []
@@ -89,11 +105,13 @@
# exist. As in, they are real, in the dataset.
ds.field_units.update(units)
ds.particle_types_raw = tuple(sorted(ds.particle_types))
-
+
def _setup_geometry(self):
super(GadgetFOFParticleIndex, self)._setup_geometry()
self._calculate_particle_index_starts()
self._calculate_file_offset_map()
+ self.data_files = sorted(self.data_files)
+ self._create_halo_id_table()
class GadgetFOFHDF5File(ParticleFile):
def __init__(self, ds, io, filename, file_id):
@@ -245,20 +263,83 @@
pass
return valid
+class GadgetFOFHaloParticleIndex(ParticleIndex):
+ def __init__(self, ds, dataset_type):
+ super(GadgetFOFHaloParticleIndex, self).__init__(ds, dataset_type)
+
+ def _initialize_particle_handler(self):
+ "Find the file that has the data for this halo."
+ ptype = self.ds.ptype
+ if self.ds.particle_identifier >= self.ds.ds.index._halo_index_end[ptype][-1]:
+ raise RuntimeError("Halo %d is out of bounds of catalog with %d halos." %
+ (self.ds.particle_identifier,
+ self.ds.ds.index._halo_index_end[ptype][-1]))
+ file_index = \
+ (self.ds.particle_identifier >=
+ self.ds.ds.index._halo_index_start[ptype]) & \
+ (self.ds.particle_identifier <
+ self.ds.ds.index._halo_index_end[ptype])
+ self.data_file = self.ds.ds.index.data_files[np.where(file_index)[0]]
+
+ def _calculate_particle_index_starts(self):
+ # Halo indices are not saved in the file, so we must count by hand.
+ # File 0 has halos 0 to N_0 - 1, file 1 has halos N_0 to N_0 + N_1 - 1, etc.
+ particle_count = defaultdict(int)
+ offset_count = 0
+ for data_file in self.data_files:
+ data_file.index_start = dict([(ptype, particle_count[ptype]) for
+ ptype in data_file.total_particles])
+ data_file.offset_start = offset_count
+ for ptype in data_file.total_particles:
+ particle_count[ptype] += data_file.total_particles[ptype]
+ offset_count += data_file.total_offset
+
+ def _detect_output_fields(self):
+ dsl, units = self.io._identify_fields(self.data_file)
+ self.field_list = dsl
+ ds = self.dataset
+ ds.particle_types = tuple(set(pt for pt, ds in dsl))
+ ds.field_units.update(units)
+
class GadgetFOFHaloDataset(Dataset):
- _index_class = GadgetFOFParticleIndex
+ _index_class = GadgetFOFHaloParticleIndex
_file_class = GadgetFOFHDF5File
- _field_info_class = GadgetFOFFieldInfo
+ _field_info_class = GadgetFOFHaloFieldInfo
_suffix = ".hdf5"
- def __init__(self, ds, ptype, particle_identifier):
+ def __init__(self, ds, ptype, particle_identifier,
+ dataset_type="gadget_fof_halo_hdf5"):
self.ds = ds
+ self.ds.index
if ptype not in ds.particle_types_raw:
raise RuntimeError("Possible halo types are %s, supplied \"%s\"." %
(ds.particle_types_raw, ptype))
+ if ptype == "Subhalo":
+ raise NotImplementedError()
self.ptype = ptype
+ self.particle_types_raw = (self.ptype,)
+ self.particle_types = self.particle_types_raw
self.particle_identifier = particle_identifier
+ super(GadgetFOFHaloDataset, self).__init__(ds.parameter_filename, dataset_type)
+
+ def print_key_parameters(self):
+ pass
+
+ def _set_derived_attrs(self):
+ pass
+
+ def _parse_parameter_file(self):
+ self.current_time = self.ds.current_time
+ self.unique_identifier = self.ds.unique_identifier
+ self.dimensionality = self.ds.dimensionality
+
+ def set_code_units(self):
+ for unit in ["length", "time", "mass",
+ "velocity", "magnetic", "temperature"]:
+ my_unit = "%s_unit" % unit
+ setattr(self, my_unit, getattr(self.ds, my_unit, None))
+
def __repr__(self):
return "%s_%s_%09d" % (self.ds, self.ptype, self.particle_identifier)
diff -r 17f761a6a26296791d0fc109bb63815a54125a8e -r 8ce310dc635c5573bacaf59bcf69a087b398e721 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -168,6 +168,87 @@
fields.extend(my_fields)
self.offset_fields = self.offset_fields.union(set(my_offset_fields))
return fields, {}
+class IOHandlerGadgetFOFHaloHDF5(BaseIOHandler):
+ _dataset_type = "gadget_fof_halo_hdf5"
+
+ def __init__(self, ds):
+ super(IOHandlerGadgetFOFHaloHDF5, self).__init__(ds)
+
+ def _read_fluid_selection(self, chunks, selector, fields, size):
+ raise NotImplementedError
+
+ def _read_particle_coords(self, chunks, ptf):
+ # This will read chunks and yield the results.
+ chunks = list(chunks)
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in sorted(data_files):
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype, field_list in sorted(ptf.items()):
+ pcount = data_file.total_particles[ptype]
+ coords = f[ptype]["%sPos" % ptype].value.astype("float64")
+ coords = np.resize(coords, (pcount, 3))
+ x = coords[:, 0]
+ y = coords[:, 1]
+ z = coords[:, 2]
+ yield ptype, (x, y, z)
+
+ def _read_particle_fields(self, chunks, ptf, selector):
+ # Now we have all the sizes, and we can allocate
+ chunks = list(chunks)
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in sorted(data_files):
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype, field_list in sorted(ptf.items()):
+ pcount = data_file.total_particles[ptype]
+ if pcount == 0: continue
+ coords = f[ptype]["%sPos" % ptype].value.astype("float64")
+ coords = np.resize(coords, (pcount, 3))
+ x = coords[:, 0]
+ y = coords[:, 1]
+ z = coords[:, 2]
+ mask = selector.select_points(x, y, z, 0.0)
+ del x, y, z
+ if mask is None: continue
+ for field in field_list:
+ if field in self.offset_fields:
+ field_data = \
+ self._read_offset_particle_field(field, data_file, f)
+ else:
+ if field == "particle_identifier":
+ field_data = \
+ np.arange(data_file.total_particles[ptype]) + \
+ data_file.index_start[ptype]
+ elif field in f[ptype]:
+ field_data = f[ptype][field].value.astype("float64")
+ else:
+ fname = field[:field.rfind("_")]
+ field_data = f[ptype][fname].value.astype("float64")
+ my_div = field_data.size / pcount
+ if my_div > 1:
+ field_data = np.resize(field_data, (pcount, my_div))
+ findex = int(field[field.rfind("_") + 1:])
+ field_data = field_data[:, findex]
+ data = field_data[mask]
+ yield (ptype, field), data
+
+ def _identify_fields(self, data_file):
+ fields = []
+ pcount = data_file.total_particles
+ if sum(pcount.values()) == 0: return fields, {}
+ with h5py.File(data_file.filename, "r") as f:
+ if "IDs" not in f:
+ return fields, {}
+ for ptype in self.ds.particle_types_raw:
+ if ptype == "Subhalo": continue
+ if data_file.total_particles[ptype] == 0: continue
+ fields.extend([(ptype, field) for field in f["IDs"]])
+ return fields, {}
def subfind_field_list(fh, ptype, pcount):
fields = []
https://bitbucket.org/yt_analysis/yt/commits/6191c85d6b80/
Changeset: 6191c85d6b80
Branch: yt
User: brittonsmith
Date: 2015-12-18 13:24:45+00:00
Summary: Halo member particle selection now works. Now time to refactor.
Affected #: 2 files
diff -r 8ce310dc635c5573bacaf59bcf69a087b398e721 -r 6191c85d6b80df186963d341fbf2d27a654a6854 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -22,9 +22,15 @@
import glob
import os
+from yt.data_objects.data_containers import \
+ YTSelectionContainer
from yt.frontends.gadget_fof.fields import \
GadgetFOFFieldInfo, \
GadgetFOFHaloFieldInfo
+from yt.geometry.geometry_handler import \
+ Index
+from yt.geometry.selection_routines import \
+ AlwaysSelector
from yt.utilities.cosmology import \
Cosmology
@@ -281,6 +287,16 @@
self.ds.ds.index._halo_index_end[ptype])
self.data_file = self.ds.ds.index.data_files[np.where(file_index)[0]]
+ # index within halo arrays that corresponds to this halo
+ self.scalar_index = self.ds.particle_identifier - \
+ self.ds.ds.index._halo_index_start[ptype][file_index][0]
+
+ # find start and end index for halo member particles
+ with h5py.File(self.data_file.filename, "r") as f:
+ halo_size = f[ptype]["GroupLen"].value
+ self.psize = halo_size[self.scalar_index]
+ self.field_index = halo_size[:self.scalar_index].sum()
+
def _calculate_particle_index_starts(self):
# Halo indices are not saved in the file, so we must count by hand.
# File 0 has halos 0 to N_0 - 1, file 1 has halos N_0 to N_0 + N_1 - 1, etc.
@@ -301,6 +317,25 @@
ds.particle_types = tuple(set(pt for pt, ds in dsl))
ds.field_units.update(units)
+ def _identify_base_chunk(self, dobj):
+ pass
+
+ def _read_particle_fields(self, fields, dobj, chunk = None):
+ if len(fields) == 0: return {}, []
+ fields_to_read, fields_to_generate = self._split_fields(fields)
+ if len(fields_to_read) == 0:
+ return {}, fields_to_generate
+ fields_to_return = self.io._read_particle_selection(
+ fields_to_read)
+ return fields_to_return, fields_to_generate
+
+class GagdetFOFHaloContainer(YTSelectionContainer):
+ _spatial = False
+ _selector = AlwaysSelector
+
+ def __init__(self, ds):
+ super(GagdetFOFHaloContainer, self).__init__(ds, {})
+
class GadgetFOFHaloDataset(Dataset):
_index_class = GadgetFOFHaloParticleIndex
_file_class = GadgetFOFHDF5File
@@ -323,6 +358,10 @@
super(GadgetFOFHaloDataset, self).__init__(ds.parameter_filename, dataset_type)
+ @property
+ def data(self):
+ return GagdetFOFHaloContainer(self)
+
def print_key_parameters(self):
pass
@@ -344,4 +383,4 @@
return "%s_%s_%09d" % (self.ds, self.ptype, self.particle_identifier)
def _setup_classes(self):
- pass
+ self.objects = []
diff -r 8ce310dc635c5573bacaf59bcf69a087b398e721 -r 6191c85d6b80df186963d341fbf2d27a654a6854 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -14,6 +14,8 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
+from collections import defaultdict
+
from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
@@ -168,6 +170,7 @@
fields.extend(my_fields)
self.offset_fields = self.offset_fields.union(set(my_offset_fields))
return fields, {}
+
class IOHandlerGadgetFOFHaloHDF5(BaseIOHandler):
_dataset_type = "gadget_fof_halo_hdf5"
@@ -178,64 +181,80 @@
raise NotImplementedError
def _read_particle_coords(self, chunks, ptf):
- # This will read chunks and yield the results.
- chunks = list(chunks)
- data_files = set([])
- for chunk in chunks:
- for obj in chunk.objs:
- data_files.update(obj.data_files)
- for data_file in sorted(data_files):
- with h5py.File(data_file.filename, "r") as f:
- for ptype, field_list in sorted(ptf.items()):
- pcount = data_file.total_particles[ptype]
- coords = f[ptype]["%sPos" % ptype].value.astype("float64")
- coords = np.resize(coords, (pcount, 3))
- x = coords[:, 0]
- y = coords[:, 1]
- z = coords[:, 2]
- yield ptype, (x, y, z)
+ pass
- def _read_particle_fields(self, chunks, ptf, selector):
- # Now we have all the sizes, and we can allocate
- chunks = list(chunks)
- data_files = set([])
- for chunk in chunks:
- for obj in chunk.objs:
- data_files.update(obj.data_files)
- for data_file in sorted(data_files):
- with h5py.File(data_file.filename, "r") as f:
- for ptype, field_list in sorted(ptf.items()):
- pcount = data_file.total_particles[ptype]
- if pcount == 0: continue
- coords = f[ptype]["%sPos" % ptype].value.astype("float64")
- coords = np.resize(coords, (pcount, 3))
- x = coords[:, 0]
- y = coords[:, 1]
- z = coords[:, 2]
- mask = selector.select_points(x, y, z, 0.0)
- del x, y, z
- if mask is None: continue
- for field in field_list:
- if field in self.offset_fields:
- field_data = \
- self._read_offset_particle_field(field, data_file, f)
- else:
- if field == "particle_identifier":
- field_data = \
- np.arange(data_file.total_particles[ptype]) + \
- data_file.index_start[ptype]
- elif field in f[ptype]:
- field_data = f[ptype][field].value.astype("float64")
- else:
- fname = field[:field.rfind("_")]
- field_data = f[ptype][fname].value.astype("float64")
- my_div = field_data.size / pcount
- if my_div > 1:
- field_data = np.resize(field_data, (pcount, my_div))
- findex = int(field[field.rfind("_") + 1:])
- field_data = field_data[:, findex]
- data = field_data[mask]
- yield (ptype, field), data
+ def _read_particle_selection(self, fields):
+ rv = {}
+ ind = {}
+ # We first need a set of masks for each particle type
+ ptf = defaultdict(list) # ON-DISK TO READ
+ fsize = defaultdict(lambda: 0) # COUNT RV
+ field_maps = defaultdict(list) # ptypes -> fields
+ unions = self.ds.particle_unions
+ # What we need is a mapping from particle types to return types
+ for field in fields:
+ ftype, fname = field
+ fsize[field] = 0
+ # We should add a check for p.fparticle_unions or something here
+ if ftype in unions:
+ for pt in unions[ftype]:
+ ptf[pt].append(fname)
+ field_maps[pt, fname].append(field)
+ else:
+ ptf[ftype].append(fname)
+ field_maps[field].append(field)
+
+ # Now we allocate
+ psize = {self.ds.ptype: self.ds.index.psize}
+ for field in fields:
+ if field[0] in unions:
+ for pt in unions[field[0]]:
+ fsize[field] += psize.get(pt, 0)
+ else:
+ fsize[field] += psize.get(field[0], 0)
+ for field in fields:
+ if field[1] in self._vector_fields:
+ shape = (fsize[field], self._vector_fields[field[1]])
+ elif field[1] in self._array_fields:
+ shape = (fsize[field],)+self._array_fields[field[1]]
+ else:
+ shape = (fsize[field], )
+ rv[field] = np.empty(shape, dtype="float64")
+ ind[field] = 0
+ # Now we read.
+ for field_r, vals in self._read_particle_fields(ptf):
+ # Note that we now need to check the mappings
+ for field_f in field_maps[field_r]:
+ my_ind = ind[field_f]
+ rv[field_f][my_ind:my_ind + vals.shape[0],...] = vals
+ ind[field_f] += vals.shape[0]
+ # Now we need to truncate all our fields, since we allow for
+ # over-estimating.
+ for field_f in ind:
+ rv[field_f] = rv[field_f][:ind[field_f]]
+ return rv
+
+ def _read_particle_fields(self, ptf):
+ data_file = self.ds.index.data_file
+ start_index = self.ds.index.field_index
+ end_index = start_index + self.ds.index.psize
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype, field_list in sorted(ptf.items()):
+ pcount = data_file.total_particles[ptype]
+ if pcount == 0: continue
+ for field in field_list:
+ if field in f["IDs"]:
+ field_data = f["IDs"][field][start_index:end_index].astype("float64")
+ else:
+ fname = field[:field.rfind("_")]
+ field_data = f["IDs"][fname][start_index:end_index].astype("float64")
+ my_div = field_data.size / pcount
+ if my_div > 1:
+ field_data = np.resize(field_data, (pcount, my_div))
+ findex = int(field[field.rfind("_") + 1:])
+ field_data = field_data[:, findex]
+ data = field_data
+ yield (ptype, field), data
def _identify_fields(self, data_file):
fields = []
https://bitbucket.org/yt_analysis/yt/commits/b8c4658d34ba/
Changeset: b8c4658d34ba
Branch: yt
User: brittonsmith
Date: 2015-12-20 20:50:37+00:00
Summary: Refactoring to hang halo container directly off the dataset.
Affected #: 2 files
diff -r 6191c85d6b80df186963d341fbf2d27a654a6854 -r b8c4658d34ba44201e2657941e61124e57a9d41e yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -21,6 +21,7 @@
import stat
import glob
import os
+import weakref
from yt.data_objects.data_containers import \
YTSelectionContainer
@@ -79,23 +80,7 @@
for i, data_file in enumerate(self.data_files):
data_file.offset_files = self.data_files[istart[i]: iend[i] + 1]
- def _create_halo_id_table(self):
- """
- Create a list of halo start ids so we know which file
- contains particles for a given halo.
- """
- self._halo_index_start = \
- dict([(ptype, np.array([dom.index_start[ptype]
- for dom in self.data_files]))
- for ptype in self.ds.particle_types_raw])
- self._halo_index_end = \
- dict([(ptype, np.array([dom.total_particles[ptype]
- for dom in self.data_files]) +
- self._halo_index_start[ptype])
- for ptype in self.ds.particle_types_raw])
-
def _detect_output_fields(self):
- # TODO: Add additional fields
dsl = []
units = {}
for dom in self.data_files:
@@ -116,8 +101,6 @@
super(GadgetFOFParticleIndex, self)._setup_geometry()
self._calculate_particle_index_starts()
self._calculate_file_offset_map()
- self.data_files = sorted(self.data_files)
- self._create_halo_id_table()
class GadgetFOFHDF5File(ParticleFile):
def __init__(self, ds, io, filename, file_id):
@@ -146,11 +129,18 @@
raise RuntimeError("units_override is not supported for GadgetFOFDataset. "+
"Use unit_base instead.")
super(GadgetFOFDataset, self).__init__(filename, dataset_type,
- units_override=units_override)
+ units_override=units_override)
+
+ _instantiated_halo_ds = None
+ @property
+ def _halos_ds(self):
+ if self._instantiated_halo_ds is None:
+ self._instantiated_halo_ds = GadgetFOFHaloDataset(self)
+ return self._instantiated_halo_ds
def _setup_classes(self):
super(GadgetFOFDataset, self)._setup_classes()
- self.halo = partial(GadgetFOFHaloDataset, self)
+ self.halo = partial(GagdetFOFHaloContainer, self._halos_ds)
def _parse_parameter_file(self):
handle = h5py.File(self.parameter_filename, mode="r")
@@ -269,33 +259,41 @@
pass
return valid
-class GadgetFOFHaloParticleIndex(ParticleIndex):
+class GadgetFOFHaloParticleIndex(GadgetFOFParticleIndex):
def __init__(self, ds, dataset_type):
+ self.real_ds = weakref.proxy(ds.real_ds)
super(GadgetFOFHaloParticleIndex, self).__init__(ds, dataset_type)
- def _initialize_particle_handler(self):
- "Find the file that has the data for this halo."
- ptype = self.ds.ptype
- if self.ds.particle_identifier >= self.ds.ds.index._halo_index_end[ptype][-1]:
- raise RuntimeError("Halo %d is out of bounds of catalog with %d halos." %
- (self.ds.particle_identifier,
- self.ds.ds.index._halo_index_end[ptype][-1]))
- file_index = \
- (self.ds.particle_identifier >=
- self.ds.ds.index._halo_index_start[ptype]) & \
- (self.ds.particle_identifier <
- self.ds.ds.index._halo_index_end[ptype])
- self.data_file = self.ds.ds.index.data_files[np.where(file_index)[0]]
+ def _setup_geometry(self):
+ self._setup_data_io()
- # index within halo arrays that corresponds to this halo
- self.scalar_index = self.ds.particle_identifier - \
- self.ds.ds.index._halo_index_start[ptype][file_index][0]
+ if self.real_ds._instantiated_index is None:
+ template = self.real_ds.filename_template
+ ndoms = self.real_ds.file_count
+ cls = self.real_ds._file_class
+ self.data_files = [cls(self.dataset, self.io, template % {'num':i}, i)
+ for i in range(ndoms)]
+ else:
+ self.data_files = self.read_ds.index.data_files
- # find start and end index for halo member particles
- with h5py.File(self.data_file.filename, "r") as f:
- halo_size = f[ptype]["GroupLen"].value
- self.psize = halo_size[self.scalar_index]
- self.field_index = halo_size[:self.scalar_index].sum()
+ self._calculate_particle_index_starts()
+ self._create_halo_id_table()
+
+ def _create_halo_id_table(self):
+ """
+ Create a list of halo start ids so we know which file
+ contains particles for a given halo.
+ """
+
+ self._halo_index_start = \
+ dict([(ptype, np.array([dom.index_start[ptype]
+ for dom in self.data_files]))
+ for ptype in self.ds.particle_types_raw])
+ self._halo_index_end = \
+ dict([(ptype, np.array([dom.total_particles[ptype]
+ for dom in self.data_files]) +
+ self._halo_index_start[ptype])
+ for ptype in self.ds.particle_types_raw])
def _calculate_particle_index_starts(self):
# Halo indices are not saved in the file, so we must count by hand.
@@ -311,11 +309,17 @@
offset_count += data_file.total_offset
def _detect_output_fields(self):
- dsl, units = self.io._identify_fields(self.data_file)
+ dsl = []
+ units = {}
+ for dom in self.data_files:
+ if sum(dom.total_particles.values()) > 0:
+ dsl, units = self.io._identify_fields(dom)
+ break
self.field_list = dsl
ds = self.dataset
ds.particle_types = tuple(set(pt for pt, ds in dsl))
ds.field_units.update(units)
+ ds.particle_types_raw = ds.particle_types
def _identify_base_chunk(self, dobj):
pass
@@ -329,38 +333,19 @@
fields_to_read)
return fields_to_return, fields_to_generate
-class GagdetFOFHaloContainer(YTSelectionContainer):
- _spatial = False
- _selector = AlwaysSelector
-
- def __init__(self, ds):
- super(GagdetFOFHaloContainer, self).__init__(ds, {})
-
class GadgetFOFHaloDataset(Dataset):
_index_class = GadgetFOFHaloParticleIndex
_file_class = GadgetFOFHDF5File
_field_info_class = GadgetFOFHaloFieldInfo
_suffix = ".hdf5"
- def __init__(self, ds, ptype, particle_identifier,
- dataset_type="gadget_fof_halo_hdf5"):
- self.ds = ds
- self.ds.index
- if ptype not in ds.particle_types_raw:
- raise RuntimeError("Possible halo types are %s, supplied \"%s\"." %
- (ds.particle_types_raw, ptype))
- if ptype == "Subhalo":
- raise NotImplementedError()
- self.ptype = ptype
- self.particle_types_raw = (self.ptype,)
+ def __init__(self, ds, dataset_type="gadget_fof_halo_hdf5"):
+ self.real_ds = ds
+ self.particle_types_raw = self.real_ds.particle_types_raw
self.particle_types = self.particle_types_raw
- self.particle_identifier = particle_identifier
- super(GadgetFOFHaloDataset, self).__init__(ds.parameter_filename, dataset_type)
-
- @property
- def data(self):
- return GagdetFOFHaloContainer(self)
+ super(GadgetFOFHaloDataset, self).__init__(
+ self.real_ds.parameter_filename, dataset_type)
def print_key_parameters(self):
pass
@@ -369,18 +354,55 @@
pass
def _parse_parameter_file(self):
- self.current_time = self.ds.current_time
- self.unique_identifier = self.ds.unique_identifier
- self.dimensionality = self.ds.dimensionality
+ for attr in ["dimensionality", "current_time",
+ "unique_identifier"]:
+ setattr(self, attr, getattr(self.real_ds, attr))
def set_code_units(self):
for unit in ["length", "time", "mass",
"velocity", "magnetic", "temperature"]:
my_unit = "%s_unit" % unit
- setattr(self, my_unit, getattr(self.ds, my_unit, None))
+ setattr(self, my_unit, getattr(self.real_ds, my_unit, None))
def __repr__(self):
- return "%s_%s_%09d" % (self.ds, self.ptype, self.particle_identifier)
+ return "%s" % self.real_ds
def _setup_classes(self):
self.objects = []
+
+class GagdetFOFHaloContainer(YTSelectionContainer):
+ _spatial = False
+ _selector = AlwaysSelector
+
+ def __init__(self, ds, ptype, particle_identifier):
+ if ptype not in ds.particle_types_raw:
+ raise RuntimeError("Possible halo types are %s, supplied \"%s\"." %
+ (ds.particle_types_raw, ptype))
+ if ptype == "Subhalo":
+ raise NotImplementedError()
+
+ self.ptype = ptype
+ self.particle_identifier = particle_identifier
+ super(GagdetFOFHaloContainer, self).__init__(ds, {})
+
+ # Find the file that has the data for this halo.
+ if self.particle_identifier >= self.ds.index._halo_index_end[ptype][-1]:
+ raise RuntimeError("Halo %d is out of bounds of catalog with %d halos." %
+ (self.particle_identifier,
+ self.ds.index._halo_index_end[ptype][-1]))
+ file_index = \
+ (self.particle_identifier >=
+ self.ds.index._halo_index_start[ptype]) & \
+ (self.particle_identifier <
+ self.ds.index._halo_index_end[ptype])
+ self.data_file = self.ds.index.data_files[np.where(file_index)[0]]
+
+ # index within halo arrays that corresponds to this halo
+ self.scalar_index = self.particle_identifier - \
+ self.ds.index._halo_index_start[ptype][file_index][0]
+
+ # find start and end index for halo member particles
+ with h5py.File(self.data_file.filename, "r") as f:
+ halo_size = f[ptype]["GroupLen"].value
+ self.psize = halo_size[self.scalar_index]
+ self.field_index = halo_size[:self.scalar_index].sum()
diff -r 6191c85d6b80df186963d341fbf2d27a654a6854 -r b8c4658d34ba44201e2657941e61124e57a9d41e yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -183,6 +183,13 @@
def _read_particle_coords(self, chunks, ptf):
pass
+ def _count_particles(self, data_file):
+ with h5py.File(data_file.filename, "r") as f:
+ pcount = {"Group": f["Header"].attrs["Ngroups_ThisFile"],
+ "Subhalo": f["Header"].attrs["Nsubgroups_ThisFile"]}
+ data_file.total_offset = 0 # need to figure out how subfind works here
+ return pcount
+
def _read_particle_selection(self, fields):
rv = {}
ind = {}
https://bitbucket.org/yt_analysis/yt/commits/b038e244be9e/
Changeset: b038e244be9e
Branch: yt
User: brittonsmith
Date: 2015-12-20 21:24:32+00:00
Summary: Implementing field querying.
Affected #: 2 files
diff -r b8c4658d34ba44201e2657941e61124e57a9d41e -r b038e244be9e1cce78961625a5c7706e83efa314 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -30,8 +30,6 @@
GadgetFOFHaloFieldInfo
from yt.geometry.geometry_handler import \
Index
-from yt.geometry.selection_routines import \
- AlwaysSelector
from yt.utilities.cosmology import \
Cosmology
@@ -330,14 +328,13 @@
if len(fields_to_read) == 0:
return {}, fields_to_generate
fields_to_return = self.io._read_particle_selection(
- fields_to_read)
+ dobj, fields_to_read)
return fields_to_return, fields_to_generate
class GadgetFOFHaloDataset(Dataset):
_index_class = GadgetFOFHaloParticleIndex
_file_class = GadgetFOFHDF5File
_field_info_class = GadgetFOFHaloFieldInfo
- _suffix = ".hdf5"
def __init__(self, ds, dataset_type="gadget_fof_halo_hdf5"):
self.real_ds = ds
@@ -372,7 +369,6 @@
class GagdetFOFHaloContainer(YTSelectionContainer):
_spatial = False
- _selector = AlwaysSelector
def __init__(self, ds, ptype, particle_identifier):
if ptype not in ds.particle_types_raw:
@@ -406,3 +402,7 @@
halo_size = f[ptype]["GroupLen"].value
self.psize = halo_size[self.scalar_index]
self.field_index = halo_size[:self.scalar_index].sum()
+
+ def __repr__(self):
+ return "%s_%s_%09d" % \
+ (self.ds, self.ptype, self.particle_identifier)
diff -r b8c4658d34ba44201e2657941e61124e57a9d41e -r b038e244be9e1cce78961625a5c7706e83efa314 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -190,7 +190,7 @@
data_file.total_offset = 0 # need to figure out how subfind works here
return pcount
- def _read_particle_selection(self, fields):
+ def _read_particle_selection(self, dobj, fields):
rv = {}
ind = {}
# We first need a set of masks for each particle type
@@ -212,7 +212,7 @@
field_maps[field].append(field)
# Now we allocate
- psize = {self.ds.ptype: self.ds.index.psize}
+ psize = {dobj.ptype: dobj.psize}
for field in fields:
if field[0] in unions:
for pt in unions[field[0]]:
@@ -229,7 +229,7 @@
rv[field] = np.empty(shape, dtype="float64")
ind[field] = 0
# Now we read.
- for field_r, vals in self._read_particle_fields(ptf):
+ for field_r, vals in self._read_particle_fields(dobj, ptf):
# Note that we now need to check the mappings
for field_f in field_maps[field_r]:
my_ind = ind[field_f]
@@ -241,20 +241,22 @@
rv[field_f] = rv[field_f][:ind[field_f]]
return rv
- def _read_particle_fields(self, ptf):
- data_file = self.ds.index.data_file
- start_index = self.ds.index.field_index
- end_index = start_index + self.ds.index.psize
+ def _read_particle_fields(self, dobj, ptf):
+ data_file = dobj.data_file
+ start_index = dobj.field_index
+ end_index = start_index + dobj.psize
with h5py.File(data_file.filename, "r") as f:
for ptype, field_list in sorted(ptf.items()):
pcount = data_file.total_particles[ptype]
if pcount == 0: continue
for field in field_list:
if field in f["IDs"]:
- field_data = f["IDs"][field][start_index:end_index].astype("float64")
+ field_data = \
+ f["IDs"][field][start_index:end_index].astype("float64")
else:
fname = field[:field.rfind("_")]
- field_data = f["IDs"][fname][start_index:end_index].astype("float64")
+ field_data = \
+ f["IDs"][fname][start_index:end_index].astype("float64")
my_div = field_data.size / pcount
if my_div > 1:
field_data = np.resize(field_data, (pcount, my_div))
https://bitbucket.org/yt_analysis/yt/commits/d358968bcfe0/
Changeset: d358968bcfe0
Branch: yt
User: brittonsmith
Date: 2015-12-21 18:42:02+00:00
Summary: Adding a count of particle ids in each halo.
Affected #: 1 file
diff -r b038e244be9e1cce78961625a5c7706e83efa314 -r d358968bcfe0a7ead5ec3bb795805d3ee55ec638 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -247,7 +247,7 @@
end_index = start_index + dobj.psize
with h5py.File(data_file.filename, "r") as f:
for ptype, field_list in sorted(ptf.items()):
- pcount = data_file.total_particles[ptype]
+ pcount = dobj.psize
if pcount == 0: continue
for field in field_list:
if field in f["IDs"]:
@@ -270,6 +270,7 @@
pcount = data_file.total_particles
if sum(pcount.values()) == 0: return fields, {}
with h5py.File(data_file.filename, "r") as f:
+ data_file.total_ids = {"Group": f["Header"].attrs["Nids_ThisFile"]}
if "IDs" not in f:
return fields, {}
for ptype in self.ds.particle_types_raw:
https://bitbucket.org/yt_analysis/yt/commits/ea0f46943f0f/
Changeset: ea0f46943f0f
Branch: yt
User: brittonsmith
Date: 2015-12-21 19:22:57+00:00
Summary: Removing duplicate function def.
Affected #: 1 file
diff -r d358968bcfe0a7ead5ec3bb795805d3ee55ec638 -r ea0f46943f0ffc3cfc0b8b14d4adfce441d56dfc yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -293,19 +293,6 @@
self._halo_index_start[ptype])
for ptype in self.ds.particle_types_raw])
- def _calculate_particle_index_starts(self):
- # Halo indices are not saved in the file, so we must count by hand.
- # File 0 has halos 0 to N_0 - 1, file 1 has halos N_0 to N_0 + N_1 - 1, etc.
- particle_count = defaultdict(int)
- offset_count = 0
- for data_file in self.data_files:
- data_file.index_start = dict([(ptype, particle_count[ptype]) for
- ptype in data_file.total_particles])
- data_file.offset_start = offset_count
- for ptype in data_file.total_particles:
- particle_count[ptype] += data_file.total_particles[ptype]
- offset_count += data_file.total_offset
-
def _detect_output_fields(self):
dsl = []
units = {}
https://bitbucket.org/yt_analysis/yt/commits/d901a11d2b87/
Changeset: d901a11d2b87
Branch: yt
User: brittonsmith
Date: 2015-12-22 20:59:08+00:00
Summary: Refactoring to reduce the number of times we open files.
Affected #: 2 files
diff -r ea0f46943f0ffc3cfc0b8b14d4adfce441d56dfc -r d901a11d2b879fef9a4e3bc4b0db55742bc85b6d yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -64,8 +64,8 @@
offset_count += data_file.total_offset
def _calculate_file_offset_map(self):
- # After the FOF is performed, a load-balancing step redistributes halos
- # and then writes more fields. Here, for each file, we create a list of
+ # After the FOF is performed, a load-balancing step redistributes halos
+ # and then writes more fields. Here, for each file, we create a list of
# files which contain the rest of the redistributed particles.
ifof = np.array([data_file.total_particles["Group"]
for data_file in self.data_files])
@@ -102,11 +102,16 @@
class GadgetFOFHDF5File(ParticleFile):
def __init__(self, ds, io, filename, file_id):
+ with h5py.File(filename, "r") as f:
+ self.header = \
+ dict((field, val) for field, val in f["Header"].attrs.items())
+ self.total_ids = {"Group": self.header["Nids_ThisFile"]}
+ self.total_particles = \
+ {"Group": self.header["Ngroups_ThisFile"],
+ "Subhalo": self.header["Nsubgroups_ThisFile"]}
+ self.total_offset = 0 # need to figure out how subfind works here
super(GadgetFOFHDF5File, self).__init__(ds, io, filename, file_id)
- with h5py.File(filename, "r") as f:
- self.header = dict((field, f.attrs[field]) \
- for field in f.attrs.keys())
-
+
class GadgetFOFDataset(Dataset):
_index_class = GadgetFOFParticleIndex
_file_class = GadgetFOFHDF5File
diff -r ea0f46943f0ffc3cfc0b8b14d4adfce441d56dfc -r d901a11d2b879fef9a4e3bc4b0db55742bc85b6d yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -151,11 +151,7 @@
return morton
def _count_particles(self, data_file):
- with h5py.File(data_file.filename, "r") as f:
- pcount = {"Group": f["Header"].attrs["Ngroups_ThisFile"],
- "Subhalo": f["Header"].attrs["Nsubgroups_ThisFile"]}
- data_file.total_offset = 0 # need to figure out how subfind works here
- return pcount
+ return data_file.total_particles
def _identify_fields(self, data_file):
fields = []
@@ -171,7 +167,7 @@
self.offset_fields = self.offset_fields.union(set(my_offset_fields))
return fields, {}
-class IOHandlerGadgetFOFHaloHDF5(BaseIOHandler):
+class IOHandlerGadgetFOFHaloHDF5(IOHandlerGadgetFOFHDF5):
_dataset_type = "gadget_fof_halo_hdf5"
def __init__(self, ds):
@@ -183,13 +179,6 @@
def _read_particle_coords(self, chunks, ptf):
pass
- def _count_particles(self, data_file):
- with h5py.File(data_file.filename, "r") as f:
- pcount = {"Group": f["Header"].attrs["Ngroups_ThisFile"],
- "Subhalo": f["Header"].attrs["Nsubgroups_ThisFile"]}
- data_file.total_offset = 0 # need to figure out how subfind works here
- return pcount
-
def _read_particle_selection(self, dobj, fields):
rv = {}
ind = {}
@@ -270,7 +259,6 @@
pcount = data_file.total_particles
if sum(pcount.values()) == 0: return fields, {}
with h5py.File(data_file.filename, "r") as f:
- data_file.total_ids = {"Group": f["Header"].attrs["Nids_ThisFile"]}
if "IDs" not in f:
return fields, {}
for ptype in self.ds.particle_types_raw:
https://bitbucket.org/yt_analysis/yt/commits/6e3fda884e84/
Changeset: 6e3fda884e84
Branch: yt
User: brittonsmith
Date: 2015-12-22 21:21:18+00:00
Summary: A little more refactoring to minimize hitting the filesystem.
Affected #: 2 files
diff -r d901a11d2b879fef9a4e3bc4b0db55742bc85b6d -r 6e3fda884e84134592bebdc350bdca9dacea116f yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -19,7 +19,6 @@
from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import stat
-import glob
import os
import weakref
@@ -104,7 +103,8 @@
def __init__(self, ds, io, filename, file_id):
with h5py.File(filename, "r") as f:
self.header = \
- dict((field, val) for field, val in f["Header"].attrs.items())
+ dict((str(field), val)
+ for field, val in f["Header"].attrs.items())
self.total_ids = {"Group": self.header["Nids_ThisFile"]}
self.total_particles = \
{"Group": self.header["Ngroups_ThisFile"],
@@ -116,7 +116,6 @@
_index_class = GadgetFOFParticleIndex
_file_class = GadgetFOFHDF5File
_field_info_class = GadgetFOFFieldInfo
- _suffix = ".hdf5"
def __init__(self, filename, dataset_type="gadget_fof_hdf5",
n_ref=16, over_refine_factor=1,
@@ -146,10 +145,10 @@
self.halo = partial(GagdetFOFHaloContainer, self._halos_ds)
def _parse_parameter_file(self):
- handle = h5py.File(self.parameter_filename, mode="r")
- hvals = {}
- hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
- hvals["NumFiles"] = hvals["NumFiles"]
+ with h5py.File(self.parameter_filename,"r") as f:
+ self.parameters = \
+ dict((str(field), val)
+ for field, val in f["Header"].attrs.items())
self.dimensionality = 3
self.refine_by = 2
@@ -158,35 +157,29 @@
# Set standard values
self.domain_left_edge = np.zeros(3, "float64")
- self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
+ self.domain_right_edge = np.ones(3, "float64") * \
+ self.parameters["BoxSize"]
nz = 1 << self.over_refine_factor
self.domain_dimensions = np.ones(3, "int32") * nz
self.cosmological_simulation = 1
self.periodicity = (True, True, True)
- self.current_redshift = hvals["Redshift"]
- self.omega_lambda = hvals["OmegaLambda"]
- self.omega_matter = hvals["Omega0"]
- self.hubble_constant = hvals["HubbleParam"]
-
+ self.current_redshift = self.parameters["Redshift"]
+ self.omega_lambda = self.parameters["OmegaLambda"]
+ self.omega_matter = self.parameters["Omega0"]
+ self.hubble_constant = self.parameters["HubbleParam"]
cosmology = Cosmology(hubble_constant=self.hubble_constant,
omega_matter=self.omega_matter,
omega_lambda=self.omega_lambda)
self.current_time = cosmology.t_from_z(self.current_redshift)
- self.parameters = hvals
prefix = os.path.abspath(
os.path.join(os.path.dirname(self.parameter_filename),
os.path.basename(self.parameter_filename).split(".", 1)[0]))
-
suffix = self.parameter_filename.rsplit(".", 1)[-1]
self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix)
- self.file_count = len(glob.glob(prefix + "*" + self._suffix))
- if self.file_count == 0:
- raise YTException(message="No data files found.", ds=self)
+ self.file_count = self.parameters["NumFiles"]
self.particle_types = ("Group", "Subhalo")
self.particle_types_raw = ("Group", "Subhalo")
-
- handle.close()
def _set_code_unit_attributes(self):
# Set a sane default for cosmological simulations.
diff -r d901a11d2b879fef9a4e3bc4b0db55742bc85b6d -r 6e3fda884e84134592bebdc350bdca9dacea116f yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -57,7 +57,8 @@
def _read_offset_particle_field(self, field, data_file, fh):
field_data = np.empty(data_file.total_particles["Group"], dtype="float64")
- fofindex = np.arange(data_file.total_particles["Group"]) + data_file.index_start["Group"]
+ fofindex = np.arange(data_file.total_particles["Group"]) + \
+ data_file.index_start["Group"]
for offset_file in data_file.offset_files:
if fh.filename == offset_file.filename:
ofh = fh
@@ -170,12 +171,6 @@
class IOHandlerGadgetFOFHaloHDF5(IOHandlerGadgetFOFHDF5):
_dataset_type = "gadget_fof_halo_hdf5"
- def __init__(self, ds):
- super(IOHandlerGadgetFOFHaloHDF5, self).__init__(ds)
-
- def _read_fluid_selection(self, chunks, selector, fields, size):
- raise NotImplementedError
-
def _read_particle_coords(self, chunks, ptf):
pass
https://bitbucket.org/yt_analysis/yt/commits/9d2cc1a57cc8/
Changeset: 9d2cc1a57cc8
Branch: yt
User: brittonsmith
Date: 2015-12-22 22:02:45+00:00
Summary: Fixing a typo and cleaning things up a bit.
Affected #: 1 file
diff -r 6e3fda884e84134592bebdc350bdca9dacea116f -r 9d2cc1a57cc87195d3537d832bf6edf14faf88a6 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -24,26 +24,24 @@
from yt.data_objects.data_containers import \
YTSelectionContainer
+from yt.data_objects.static_output import \
+ Dataset, \
+ ParticleFile
+from yt.frontends.gadget.data_structures import \
+ _fix_unit_ordering
from yt.frontends.gadget_fof.fields import \
GadgetFOFFieldInfo, \
GadgetFOFHaloFieldInfo
from yt.geometry.geometry_handler import \
Index
-
+from yt.geometry.particle_geometry_handler import \
+ ParticleIndex
from yt.utilities.cosmology import \
Cosmology
from yt.utilities.exceptions import \
YTException
from yt.utilities.logger import ytLogger as \
mylog
-from yt.geometry.particle_geometry_handler import \
- ParticleIndex
-from yt.data_objects.static_output import \
- Dataset, \
- ParticleFile
-from yt.frontends.gadget.data_structures import \
- _fix_unit_ordering
-
class GadgetFOFParticleIndex(ParticleIndex):
def __init__(self, ds, dataset_type):
@@ -98,7 +96,7 @@
super(GadgetFOFParticleIndex, self)._setup_geometry()
self._calculate_particle_index_starts()
self._calculate_file_offset_map()
-
+
class GadgetFOFHDF5File(ParticleFile):
def __init__(self, ds, io, filename, file_id):
with h5py.File(filename, "r") as f:
@@ -267,10 +265,11 @@
template = self.real_ds.filename_template
ndoms = self.real_ds.file_count
cls = self.real_ds._file_class
- self.data_files = [cls(self.dataset, self.io, template % {'num':i}, i)
- for i in range(ndoms)]
+ self.data_files = \
+ [cls(self.dataset, self.io, template % {'num':i}, i)
+ for i in range(ndoms)]
else:
- self.data_files = self.read_ds.index.data_files
+ self.data_files = self.real_ds.index.data_files
self._calculate_particle_index_starts()
self._create_halo_id_table()
https://bitbucket.org/yt_analysis/yt/commits/828f213e749a/
Changeset: 828f213e749a
Branch: yt
User: brittonsmith
Date: 2015-12-22 23:23:03+00:00
Summary: Correcting halo index calculation now that we know that a halo and its ids are not necessarily in the same file, which is such a joy.
Affected #: 1 file
diff -r 9d2cc1a57cc87195d3537d832bf6edf14faf88a6 -r 828f213e749a0108fd5fa72b91e53a0f0c58eb19 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -47,6 +47,12 @@
def __init__(self, ds, dataset_type):
super(GadgetFOFParticleIndex, self).__init__(ds, dataset_type)
+ def _calculate_particle_count(self):
+ "Calculate the total number of each type of particle."
+ self.particle_count = \
+ dict([(ptype, sum([d.total_particles[ptype] for d in self.data_files]))
+ for ptype in self.ds.particle_types_raw])
+
def _calculate_particle_index_starts(self):
# Halo indices are not saved in the file, so we must count by hand.
# File 0 has halos 0 to N_0 - 1, file 1 has halos N_0 to N_0 + N_1 - 1, etc.
@@ -94,6 +100,7 @@
def _setup_geometry(self):
super(GadgetFOFParticleIndex, self)._setup_geometry()
+ self._calculate_particle_count()
self._calculate_particle_index_starts()
self._calculate_file_offset_map()
@@ -103,7 +110,8 @@
self.header = \
dict((str(field), val)
for field, val in f["Header"].attrs.items())
- self.total_ids = {"Group": self.header["Nids_ThisFile"]}
+ self.total_ids = {"Group": self.header["Nids_ThisFile"],
+ "Subhalo": 0} # subhalos not yet supported
self.total_particles = \
{"Group": self.header["Ngroups_ThisFile"],
"Subhalo": self.header["Nsubgroups_ThisFile"]}
@@ -271,24 +279,29 @@
else:
self.data_files = self.real_ds.index.data_files
- self._calculate_particle_index_starts()
+ self._calculate_particle_count()
self._create_halo_id_table()
def _create_halo_id_table(self):
"""
Create a list of halo start ids so we know which file
- contains particles for a given halo.
+ contains particles for a given halo. Note, the halo ids
+ are distributed over all files and so the ids for a given
+ halo are likely stored in a different file than the halo
+ itself.
"""
- self._halo_index_start = \
- dict([(ptype, np.array([dom.index_start[ptype]
+ all_ids = \
+ dict([(ptype, np.array([dom.total_ids[ptype]
for dom in self.data_files]))
for ptype in self.ds.particle_types_raw])
+
self._halo_index_end = \
- dict([(ptype, np.array([dom.total_particles[ptype]
- for dom in self.data_files]) +
- self._halo_index_start[ptype])
- for ptype in self.ds.particle_types_raw])
+ dict([(ptype, val.cumsum())
+ for ptype, val in all_ids.items()])
+ self._halo_index_start = \
+ dict([(ptype, val - all_ids[ptype])
+ for ptype, val in self._halo_index_end.items()])
def _detect_output_fields(self):
dsl = []
https://bitbucket.org/yt_analysis/yt/commits/efea2cdc4daa/
Changeset: efea2cdc4daa
Branch: yt
User: brittonsmith
Date: 2015-12-23 13:33:34+00:00
Summary: Now we are correctly calculating the files in which halo particles live.
Affected #: 1 file
diff -r 828f213e749a0108fd5fa72b91e53a0f0c58eb19 -r efea2cdc4daa5b6548455952583e57992017874f yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -66,6 +66,16 @@
particle_count[ptype] += data_file.total_particles[ptype]
offset_count += data_file.total_offset
+ self._halo_index_start = \
+ dict([(ptype, np.array([dom.index_start[ptype]
+ for dom in self.data_files]))
+ for ptype in self.ds.particle_types_raw])
+ self._halo_index_end = \
+ dict([(ptype, np.array([dom.total_particles[ptype]
+ for dom in self.data_files]) +
+ self._halo_index_start[ptype])
+ for ptype in self.ds.particle_types_raw])
+
def _calculate_file_offset_map(self):
# After the FOF is performed, a load-balancing step redistributes halos
# and then writes more fields. Here, for each file, we create a list of
@@ -279,6 +289,7 @@
else:
self.data_files = self.real_ds.index.data_files
+ self._calculate_particle_index_starts()
self._calculate_particle_count()
self._create_halo_id_table()
@@ -296,12 +307,12 @@
for dom in self.data_files]))
for ptype in self.ds.particle_types_raw])
- self._halo_index_end = \
+ self._halo_id_end = \
dict([(ptype, val.cumsum())
for ptype, val in all_ids.items()])
- self._halo_index_start = \
+ self._halo_id_start = \
dict([(ptype, val - all_ids[ptype])
- for ptype, val in self._halo_index_end.items()])
+ for ptype, val in self._halo_id_end.items()])
def _detect_output_fields(self):
dsl = []
@@ -378,27 +389,40 @@
self.particle_identifier = particle_identifier
super(GagdetFOFHaloContainer, self).__init__(ds, {})
- # Find the file that has the data for this halo.
- if self.particle_identifier >= self.ds.index._halo_index_end[ptype][-1]:
- raise RuntimeError("Halo %d is out of bounds of catalog with %d halos." %
- (self.particle_identifier,
- self.ds.index._halo_index_end[ptype][-1]))
- file_index = \
- (self.particle_identifier >=
- self.ds.index._halo_index_start[ptype]) & \
- (self.particle_identifier <
- self.ds.index._halo_index_end[ptype])
- self.data_file = self.ds.index.data_files[np.where(file_index)[0]]
+ if self.particle_identifier >= self.index.particle_count[ptype]:
+ raise RuntimeError("%s %d requested, but only %d %s objects exist." %
+ (ptype, particle_identifier,
+ self.index.particle_count[ptype], ptype))
+
+ # Find the file that has the scalar values for this halo.
+ i_scalar = np.digitize([particle_identifier],
+ self.index._halo_index_start[ptype],
+ right=False)[0] - 1
# index within halo arrays that corresponds to this halo
self.scalar_index = self.particle_identifier - \
- self.ds.index._halo_index_start[ptype][file_index][0]
+ self.ds.index._halo_index_start[ptype][i_scalar]
+ self.scalar_data_file = self.ds.index.data_files[i_scalar]
- # find start and end index for halo member particles
- with h5py.File(self.data_file.filename, "r") as f:
+ # Find the files that have the member particles for this halo.
+ all_id_start = 0
+ for d in self.index.data_files[:i_scalar]:
+ with h5py.File(d.filename, "r") as f:
+ all_id_start += int(f[ptype]["GroupLen"].value.sum())
+
+ with h5py.File(self.scalar_data_file.filename, "r") as f:
halo_size = f[ptype]["GroupLen"].value
+ all_id_start += int(halo_size[:self.scalar_index].sum())
self.psize = halo_size[self.scalar_index]
- self.field_index = halo_size[:self.scalar_index].sum()
+
+ i_start = np.digitize([all_id_start],
+ self.index._halo_id_start[ptype],
+ right=False)[0] - 1
+ i_end = np.digitize([all_id_start+self.psize],
+ self.index._halo_id_end[ptype],
+ right=True)[0]
+ self.field_data_files = self.ds.index.data_files[i_start:i_end+1]
+ self.all_id_start = all_id_start
def __repr__(self):
return "%s_%s_%09d" % \
https://bitbucket.org/yt_analysis/yt/commits/3bc1d8b9d0c0/
Changeset: 3bc1d8b9d0c0
Branch: yt
User: brittonsmith
Date: 2015-12-23 14:35:58+00:00
Summary: Updating io routine. Now this works.
Affected #: 2 files
diff -r efea2cdc4daa5b6548455952583e57992017874f -r 3bc1d8b9d0c0aa2baf89ae9a0723317a3cc5aaaa yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -70,11 +70,6 @@
dict([(ptype, np.array([dom.index_start[ptype]
for dom in self.data_files]))
for ptype in self.ds.particle_types_raw])
- self._halo_index_end = \
- dict([(ptype, np.array([dom.total_particles[ptype]
- for dom in self.data_files]) +
- self._halo_index_start[ptype])
- for ptype in self.ds.particle_types_raw])
def _calculate_file_offset_map(self):
# After the FOF is performed, a load-balancing step redistributes halos
@@ -401,13 +396,13 @@
# index within halo arrays that corresponds to this halo
self.scalar_index = self.particle_identifier - \
- self.ds.index._halo_index_start[ptype][i_scalar]
- self.scalar_data_file = self.ds.index.data_files[i_scalar]
+ self.index._halo_index_start[ptype][i_scalar]
+ self.scalar_data_file = self.index.data_files[i_scalar]
# Find the files that have the member particles for this halo.
all_id_start = 0
- for d in self.index.data_files[:i_scalar]:
- with h5py.File(d.filename, "r") as f:
+ for data_file in self.index.data_files[:i_scalar]:
+ with h5py.File(data_file.filename, "r") as f:
all_id_start += int(f[ptype]["GroupLen"].value.sum())
with h5py.File(self.scalar_data_file.filename, "r") as f:
@@ -421,7 +416,14 @@
i_end = np.digitize([all_id_start+self.psize],
self.index._halo_id_end[ptype],
right=True)[0]
- self.field_data_files = self.ds.index.data_files[i_start:i_end+1]
+ self.field_data_files = self.index.data_files[i_start:i_end+1]
+ self.field_data_start = \
+ (all_id_start -
+ self.index._halo_id_start[ptype][i_start:i_end+1]).clip(min=0)
+ self.field_data_end = \
+ (all_id_start + self.psize -
+ self.index._halo_id_start[ptype][i_start:i_end+1]).clip(
+ max=self.index._halo_id_end[ptype][i_start:i_end+1])
self.all_id_start = all_id_start
def __repr__(self):
diff -r efea2cdc4daa5b6548455952583e57992017874f -r 3bc1d8b9d0c0aa2baf89ae9a0723317a3cc5aaaa yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -226,28 +226,35 @@
return rv
def _read_particle_fields(self, dobj, ptf):
- data_file = dobj.data_file
- start_index = dobj.field_index
- end_index = start_index + dobj.psize
- with h5py.File(data_file.filename, "r") as f:
- for ptype, field_list in sorted(ptf.items()):
- pcount = dobj.psize
- if pcount == 0: continue
- for field in field_list:
- if field in f["IDs"]:
- field_data = \
- f["IDs"][field][start_index:end_index].astype("float64")
- else:
- fname = field[:field.rfind("_")]
- field_data = \
- f["IDs"][fname][start_index:end_index].astype("float64")
- my_div = field_data.size / pcount
- if my_div > 1:
- field_data = np.resize(field_data, (pcount, my_div))
- findex = int(field[field.rfind("_") + 1:])
- field_data = field_data[:, findex]
- data = field_data
- yield (ptype, field), data
+ def field_array():
+ return np.empty(dobj.psize, dtype=np.float64)
+ data = defaultdict(field_array)
+ field_start = 0
+ for i, data_file in enumerate(dobj.field_data_files):
+ start_index = dobj.field_data_start[i]
+ end_index = dobj.field_data_end[i]
+ field_end = field_start + end_index - start_index
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype, field_list in sorted(ptf.items()):
+ for field in field_list:
+ field_data = data[(ptype, field)]
+ if field in f["IDs"]:
+ my_data = \
+ f["IDs"][field][start_index:end_index].astype("float64")
+ else:
+ fname = field[:field.rfind("_")]
+ my_data = \
+ f["IDs"][fname][start_index:end_index].astype("float64")
+ my_div = my_data.size / pcount
+ if my_div > 1:
+ my_data = np.resize(my_data, (pcount, my_div))
+ findex = int(field[field.rfind("_") + 1:])
+ my_data = my_data[:, findex]
+ field_data[field_start:field_end] = my_data
+ field_start = field_end
+
+ for field, field_data in data.items():
+ yield field, field_data
def _identify_fields(self, data_file):
fields = []
https://bitbucket.org/yt_analysis/yt/commits/847a1374ec8d/
Changeset: 847a1374ec8d
Branch: yt
User: brittonsmith
Date: 2015-12-23 14:54:59+00:00
Summary: Import cleanup.
Affected #: 1 file
diff -r 3bc1d8b9d0c0aa2baf89ae9a0723317a3cc5aaaa -r 847a1374ec8dbb31059b1130ac423e7fbfb7958b yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -15,17 +15,16 @@
#-----------------------------------------------------------------------------
from collections import defaultdict
-
from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
-from yt.utilities.exceptions import YTDomainOverflow
from yt.funcs import mylog
-
+from yt.utilities.exceptions import \
+ YTDomainOverflow
from yt.utilities.io_handler import \
BaseIOHandler
-
-from yt.utilities.lib.geometry_utils import compute_morton
+from yt.utilities.lib.geometry_utils import \
+ compute_morton
class IOHandlerGadgetFOFHDF5(BaseIOHandler):
_dataset_type = "gadget_fof_hdf5"
https://bitbucket.org/yt_analysis/yt/commits/c61ccff3ed23/
Changeset: c61ccff3ed23
Branch: yt
User: brittonsmith
Date: 2015-12-23 16:46:17+00:00
Summary: Don't loop over all files to find fields.
Affected #: 1 file
diff -r 847a1374ec8dbb31059b1130ac423e7fbfb7958b -r c61ccff3ed2316252333953f1ce81c1196fab369 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -67,8 +67,8 @@
offset_count += data_file.total_offset
self._halo_index_start = \
- dict([(ptype, np.array([dom.index_start[ptype]
- for dom in self.data_files]))
+ dict([(ptype, np.array([data_file.index_start[ptype]
+ for data_file in self.data_files]))
for ptype in self.ds.particle_types_raw])
def _calculate_file_offset_map(self):
@@ -89,19 +89,25 @@
def _detect_output_fields(self):
dsl = []
units = {}
- for dom in self.data_files:
- fl, _units = self.io._identify_fields(dom)
+ found_fields = \
+ dict([(ptype, False)
+ for ptype, pnum in self.particle_count.items()
+ if pnum > 0])
+
+ for data_file in self.data_files:
+ fl, _units = self.io._identify_fields(data_file)
units.update(_units)
- dom._calculate_offsets(fl)
- for f in fl:
- if f not in dsl: dsl.append(f)
+ data_file._calculate_offsets(fl)
+ dsl.extend([f for f in fl if f not in dsl])
+ for ptype in found_fields:
+ found_fields[ptype] |= data_file.total_particles[ptype]
+ if all(found_fields.values()): break
+
self.field_list = dsl
ds = self.dataset
ds.particle_types = tuple(set(pt for pt, ds in dsl))
- # This is an attribute that means these particle types *actually*
- # exist. As in, they are real, in the dataset.
ds.field_units.update(units)
- ds.particle_types_raw = tuple(sorted(ds.particle_types))
+ ds.particle_types_raw = ds.particle_types
def _setup_geometry(self):
super(GadgetFOFParticleIndex, self)._setup_geometry()
@@ -298,8 +304,8 @@
"""
all_ids = \
- dict([(ptype, np.array([dom.total_ids[ptype]
- for dom in self.data_files]))
+ dict([(ptype, np.array([data_file.total_ids[ptype]
+ for data_file in self.data_files]))
for ptype in self.ds.particle_types_raw])
self._halo_id_end = \
@@ -312,10 +318,19 @@
def _detect_output_fields(self):
dsl = []
units = {}
- for dom in self.data_files:
- if sum(dom.total_particles.values()) > 0:
- dsl, units = self.io._identify_fields(dom)
- break
+ found_fields = \
+ dict([(ptype, False)
+ for ptype, pnum in self.particle_count.items()
+ if pnum > 0])
+
+ for data_file in self.data_files:
+ fl, _units = self.io._identify_fields(data_file)
+ units.update(_units)
+ dsl.extend([f for f in fl if f not in dsl])
+ for ptype in found_fields:
+ found_fields[ptype] |= data_file.total_particles[ptype]
+ if all(found_fields.values()): break
+
self.field_list = dsl
ds = self.dataset
ds.particle_types = tuple(set(pt for pt, ds in dsl))
https://bitbucket.org/yt_analysis/yt/commits/bb1bd54449f2/
Changeset: bb1bd54449f2
Branch: yt
User: brittonsmith
Date: 2015-12-23 20:32:00+00:00
Summary: Detecting regular fields and scalar fields.
Affected #: 3 files
diff -r c61ccff3ed2316252333953f1ce81c1196fab369 -r bb1bd54449f2d92386c43c99b5e60896b366baff yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -87,7 +87,7 @@
data_file.offset_files = self.data_files[istart[i]: iend[i] + 1]
def _detect_output_fields(self):
- dsl = []
+ field_list = []
units = {}
found_fields = \
dict([(ptype, False)
@@ -97,15 +97,14 @@
for data_file in self.data_files:
fl, _units = self.io._identify_fields(data_file)
units.update(_units)
- data_file._calculate_offsets(fl)
- dsl.extend([f for f in fl if f not in dsl])
+ field_list.extend([f for f in fl if f not in field_list])
for ptype in found_fields:
found_fields[ptype] |= data_file.total_particles[ptype]
if all(found_fields.values()): break
- self.field_list = dsl
+ self.field_list = field_list
ds = self.dataset
- ds.particle_types = tuple(set(pt for pt, ds in dsl))
+ ds.particle_types = tuple(set(pt for pt, ds in field_list))
ds.field_units.update(units)
ds.particle_types_raw = ds.particle_types
@@ -316,7 +315,8 @@
for ptype, val in self._halo_id_end.items()])
def _detect_output_fields(self):
- dsl = []
+ field_list = []
+ scalar_field_list = []
units = {}
found_fields = \
dict([(ptype, False)
@@ -324,16 +324,20 @@
if pnum > 0])
for data_file in self.data_files:
- fl, _units = self.io._identify_fields(data_file)
+ fl, sl, _units = self.io._identify_fields(data_file)
units.update(_units)
- dsl.extend([f for f in fl if f not in dsl])
+ field_list.extend([f for f in fl
+ if f not in field_list])
+ scalar_field_list.extend([f for f in sl
+ if f not in scalar_field_list])
for ptype in found_fields:
found_fields[ptype] |= data_file.total_particles[ptype]
if all(found_fields.values()): break
- self.field_list = dsl
+ self.field_list = field_list
+ self.scalar_field_list = scalar_field_list
ds = self.dataset
- ds.particle_types = tuple(set(pt for pt, ds in dsl))
+ ds.particle_types = tuple(set(pt for pt, ds in field_list))
ds.field_units.update(units)
ds.particle_types_raw = ds.particle_types
diff -r c61ccff3ed2316252333953f1ce81c1196fab369 -r bb1bd54449f2d92386c43c99b5e60896b366baff yt/frontends/gadget_fof/fields.py
--- a/yt/frontends/gadget_fof/fields.py
+++ b/yt/frontends/gadget_fof/fields.py
@@ -62,5 +62,5 @@
)
known_particle_fields = (
- ("ID", ("", ["Group", "particle_identifier"], None)),
+ ("ID", ("", ["Group", "member_ids"], None)),
)
diff -r c61ccff3ed2316252333953f1ce81c1196fab369 -r bb1bd54449f2d92386c43c99b5e60896b366baff yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -257,16 +257,22 @@
def _identify_fields(self, data_file):
fields = []
+ scalar_fields = []
pcount = data_file.total_particles
- if sum(pcount.values()) == 0: return fields, {}
+ if sum(pcount.values()) == 0: return fields, scalar_fields, {}
with h5py.File(data_file.filename, "r") as f:
- if "IDs" not in f:
- return fields, {}
for ptype in self.ds.particle_types_raw:
+ if data_file.total_particles[ptype] == 0: continue
+ fields.append((ptype, "particle_identifier"))
+ my_fields, my_offset_fields = \
+ subfind_field_list(f[ptype], ptype, data_file.total_particles)
+ fields.extend(my_fields)
+ scalar_fields.extend(my_fields)
+
+ if "IDs" not in f: continue
if ptype == "Subhalo": continue
- if data_file.total_particles[ptype] == 0: continue
fields.extend([(ptype, field) for field in f["IDs"]])
- return fields, {}
+ return fields, scalar_fields, {}
def subfind_field_list(fh, ptype, pcount):
fields = []
@@ -286,12 +292,12 @@
fields.append((ptype, "%s_%d" % (fname, i)))
else:
fields.append((ptype, fname))
- elif ptype == "Subfind" and \
- not fh[field].size % fh["/Subfind"].attrs["Number_of_groups"]:
+ elif ptype == "Subhalo" and \
+ not fh[field].size % fh["/Subhalo"].attrs["Number_of_groups"]:
# These are actually Group fields, but they were written after
# a load balancing step moved halos around and thus they do not
# correspond to the halos stored in the Group group.
- my_div = fh[field].size / fh["/Subfind"].attrs["Number_of_groups"]
+ my_div = fh[field].size / fh["/Subhalo"].attrs["Number_of_groups"]
fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
if my_div > 1:
for i in range(int(my_div)):
https://bitbucket.org/yt_analysis/yt/commits/3461c07a6c25/
Changeset: 3461c07a6c25
Branch: yt
User: brittonsmith
Date: 2015-12-23 21:45:05+00:00
Summary: Adding ability to query scalar fields from halo containers.
Affected #: 3 files
diff -r bb1bd54449f2d92386c43c99b5e60896b366baff -r 3461c07a6c25819e526255d4c8432ef4ffb4e439 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -337,6 +337,7 @@
self.field_list = field_list
self.scalar_field_list = scalar_field_list
ds = self.dataset
+ ds.scalar_field_list = scalar_field_list
ds.particle_types = tuple(set(pt for pt, ds in field_list))
ds.field_units.update(units)
ds.particle_types_raw = ds.particle_types
diff -r bb1bd54449f2d92386c43c99b5e60896b366baff -r 3461c07a6c25819e526255d4c8432ef4ffb4e439 yt/frontends/gadget_fof/fields.py
--- a/yt/frontends/gadget_fof/fields.py
+++ b/yt/frontends/gadget_fof/fields.py
@@ -62,5 +62,21 @@
)
known_particle_fields = (
+ ("GroupPos_0", (p_units, ["Group", "particle_position_x"], None)),
+ ("GroupPos_1", (p_units, ["Group", "particle_position_y"], None)),
+ ("GroupPos_2", (p_units, ["Group", "particle_position_z"], None)),
+ ("GroupVel_0", (v_units, ["Group", "particle_velocity_x"], None)),
+ ("GroupVel_1", (v_units, ["Group", "particle_velocity_y"], None)),
+ ("GroupVel_2", (v_units, ["Group", "particle_velocity_z"], None)),
+ ("GroupMass", (m_units, ["Group", "particle_mass"], None)),
+ ("GroupLen", ("", ["Group", "particle_number"], None)),
+ ("SubhaloPos_0", (p_units, ["Subhalo", "particle_position_x"], None)),
+ ("SubhaloPos_1", (p_units, ["Subhalo", "particle_position_y"], None)),
+ ("SubhaloPos_2", (p_units, ["Subhalo", "particle_position_z"], None)),
+ ("SubhaloVel_0", (v_units, ["Subhalo", "particle_velocity_x"], None)),
+ ("SubhaloVel_1", (v_units, ["Subhalo", "particle_velocity_y"], None)),
+ ("SubhaloVel_2", (v_units, ["Subhalo", "particle_velocity_z"], None)),
+ ("SubhaloMass", (m_units, ["Subhalo", "particle_mass"], None)),
+ ("SubhaloLen", ("", ["Subhalo", "particle_number"], None)),
("ID", ("", ["Group", "member_ids"], None)),
)
diff -r bb1bd54449f2d92386c43c99b5e60896b366baff -r 3461c07a6c25819e526255d4c8432ef4ffb4e439 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -207,6 +207,8 @@
shape = (fsize[field], self._vector_fields[field[1]])
elif field[1] in self._array_fields:
shape = (fsize[field],)+self._array_fields[field[1]]
+ elif field in self.ds.scalar_field_list:
+ shape = (1,)
else:
shape = (fsize[field], )
rv[field] = np.empty(shape, dtype="float64")
@@ -224,19 +226,44 @@
rv[field_f] = rv[field_f][:ind[field_f]]
return rv
- def _read_particle_fields(self, dobj, ptf):
+ def _read_scalar_fields(self, dobj, scalar_fields):
+ all_data = {}
+ if not scalar_fields: return all_data
+ with h5py.File(dobj.scalar_data_file.filename, "r") as f:
+ for ptype, field_list in sorted(scalar_fields.items()):
+ for field in field_list:
+ if field == "particle_identifier":
+ field_data = \
+ np.arange(dobj.scalar_data_file.total_particles[ptype]) + \
+ dobj.scalar_data_file.index_start[ptype]
+ elif field in f[ptype]:
+ field_data = f[ptype][field].value.astype("float64")
+ else:
+ fname = field[:field.rfind("_")]
+ field_data = f[ptype][fname].value.astype("float64")
+ my_div = field_data.size / pcount
+ if my_div > 1:
+ field_data = np.resize(field_data, (pcount, my_div))
+ findex = int(field[field.rfind("_") + 1:])
+ field_data = field_data[:, findex]
+ data = np.array([field_data[dobj.scalar_index]])
+ all_data[(ptype, field)] = data
+ return all_data
+
+ def _read_member_fields(self, dobj, member_fields):
def field_array():
return np.empty(dobj.psize, dtype=np.float64)
- data = defaultdict(field_array)
+ all_data = defaultdict(field_array)
+ if not member_fields: return all_data
field_start = 0
for i, data_file in enumerate(dobj.field_data_files):
start_index = dobj.field_data_start[i]
end_index = dobj.field_data_end[i]
field_end = field_start + end_index - start_index
with h5py.File(data_file.filename, "r") as f:
- for ptype, field_list in sorted(ptf.items()):
+ for ptype, field_list in sorted(member_fields.items()):
for field in field_list:
- field_data = data[(ptype, field)]
+ field_data = all_data[(ptype, field)]
if field in f["IDs"]:
my_data = \
f["IDs"][field][start_index:end_index].astype("float64")
@@ -251,8 +278,23 @@
my_data = my_data[:, findex]
field_data[field_start:field_end] = my_data
field_start = field_end
+ return all_data
- for field, field_data in data.items():
+ def _read_particle_fields(self, dobj, ptf):
+ # separate member particle fields from scalar fields
+ scalar_fields = defaultdict(list)
+ member_fields = defaultdict(list)
+ for ptype, field_list in sorted(ptf.items()):
+ for field in field_list:
+ if (ptype, field) in self.ds.scalar_field_list:
+ scalar_fields[ptype].append(field)
+ else:
+ member_fields[ptype].append(field)
+
+ all_data = self._read_scalar_fields(dobj, scalar_fields)
+ all_data.update(self._read_member_fields(dobj, member_fields))
+
+ for field, field_data in all_data.items():
yield field, field_data
def _identify_fields(self, data_file):
@@ -264,6 +306,7 @@
for ptype in self.ds.particle_types_raw:
if data_file.total_particles[ptype] == 0: continue
fields.append((ptype, "particle_identifier"))
+ scalar_fields.append((ptype, "particle_identifier"))
my_fields, my_offset_fields = \
subfind_field_list(f[ptype], ptype, data_file.total_particles)
fields.extend(my_fields)
https://bitbucket.org/yt_analysis/yt/commits/6510ac79e7e4/
Changeset: 6510ac79e7e4
Branch: yt
User: brittonsmith
Date: 2015-12-23 21:51:19+00:00
Summary: Making sure to inherit unit registry and fixing small bug in reading scalar fields.
Affected #: 2 files
diff -r 3461c07a6c25819e526255d4c8432ef4ffb4e439 -r 6510ac79e7e44d30c1abc90558cbbce3dd59e3b5 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -383,6 +383,7 @@
"velocity", "magnetic", "temperature"]:
my_unit = "%s_unit" % unit
setattr(self, my_unit, getattr(self.real_ds, my_unit, None))
+ self.unit_registry = self.real_ds.unit_registry
def __repr__(self):
return "%s" % self.real_ds
diff -r 3461c07a6c25819e526255d4c8432ef4ffb4e439 -r 6510ac79e7e44d30c1abc90558cbbce3dd59e3b5 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -229,6 +229,7 @@
def _read_scalar_fields(self, dobj, scalar_fields):
all_data = {}
if not scalar_fields: return all_data
+ pcount = 1
with h5py.File(dobj.scalar_data_file.filename, "r") as f:
for ptype, field_list in sorted(scalar_fields.items()):
for field in field_list:
https://bitbucket.org/yt_analysis/yt/commits/2d89c6af97b8/
Changeset: 2d89c6af97b8
Branch: yt
User: brittonsmith
Date: 2015-12-23 21:56:33+00:00
Summary: Renaming variable.
Affected #: 2 files
diff -r 6510ac79e7e44d30c1abc90558cbbce3dd59e3b5 -r 2d89c6af97b8bfb1e12188de5a734b5361570e78 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -429,12 +429,12 @@
with h5py.File(self.scalar_data_file.filename, "r") as f:
halo_size = f[ptype]["GroupLen"].value
all_id_start += int(halo_size[:self.scalar_index].sum())
- self.psize = halo_size[self.scalar_index]
+ self.particle_number = halo_size[self.scalar_index]
i_start = np.digitize([all_id_start],
self.index._halo_id_start[ptype],
right=False)[0] - 1
- i_end = np.digitize([all_id_start+self.psize],
+ i_end = np.digitize([all_id_start+self.particle_number],
self.index._halo_id_end[ptype],
right=True)[0]
self.field_data_files = self.index.data_files[i_start:i_end+1]
@@ -442,7 +442,7 @@
(all_id_start -
self.index._halo_id_start[ptype][i_start:i_end+1]).clip(min=0)
self.field_data_end = \
- (all_id_start + self.psize -
+ (all_id_start + self.particle_number -
self.index._halo_id_start[ptype][i_start:i_end+1]).clip(
max=self.index._halo_id_end[ptype][i_start:i_end+1])
self.all_id_start = all_id_start
diff -r 6510ac79e7e44d30c1abc90558cbbce3dd59e3b5 -r 2d89c6af97b8bfb1e12188de5a734b5361570e78 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -195,7 +195,7 @@
field_maps[field].append(field)
# Now we allocate
- psize = {dobj.ptype: dobj.psize}
+ psize = {dobj.ptype: dobj.particle_number}
for field in fields:
if field[0] in unions:
for pt in unions[field[0]]:
@@ -253,7 +253,7 @@
def _read_member_fields(self, dobj, member_fields):
def field_array():
- return np.empty(dobj.psize, dtype=np.float64)
+ return np.empty(dobj.particle_number, dtype=np.float64)
all_data = defaultdict(field_array)
if not member_fields: return all_data
field_start = 0
https://bitbucket.org/yt_analysis/yt/commits/324e9c4cb497/
Changeset: 324e9c4cb497
Branch: yt
User: brittonsmith
Date: 2015-12-23 22:01:03+00:00
Summary: Adding mass, position, and velocity attributes for halo containers.
Affected #: 1 file
diff -r 2d89c6af97b8bfb1e12188de5a734b5361570e78 -r 324e9c4cb4976b5d25c63b6a06ba0f5955e3d172 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -447,6 +447,9 @@
max=self.index._halo_id_end[ptype][i_start:i_end+1])
self.all_id_start = all_id_start
+ for attr in ["mass", "position", "velocity"]:
+ setattr(self, attr, self[self.ptype, "particle_%s" % attr][0])
+
def __repr__(self):
return "%s_%s_%09d" % \
(self.ds, self.ptype, self.particle_identifier)
https://bitbucket.org/yt_analysis/yt/commits/68d7239294cf/
Changeset: 68d7239294cf
Branch: yt
User: brittonsmith
Date: 2015-12-28 22:09:55+00:00
Summary: Fleshing out field list for gadget_fof frontend.
Affected #: 1 file
diff -r 324e9c4cb4976b5d25c63b6a06ba0f5955e3d172 -r 68d7239294cfe7763a62de3b0ee70240b021a4d7 yt/frontends/gadget_fof/fields.py
--- a/yt/frontends/gadget_fof/fields.py
+++ b/yt/frontends/gadget_fof/fields.py
@@ -21,28 +21,70 @@
p_units = "code_length"
v_units = "code_velocity"
+_pnums = 6
+_type_fields = \
+ tuple(("%s%sType_%d" % (ptype, field, pnum), (units, [], None))
+ for pnum in range(_pnums)
+ for field, units in (("Mass", m_units), ("Len", p_units))
+ for ptype in ("Group", "Subhalo"))
+_sub_type_fields = \
+ tuple(("Subhalo%sType_%d" % (field, pnum), (units, [], None))
+ for pnum in range(_pnums)
+ for field, units in (("HalfmassRad", p_units),
+ ("MassInHalfRad", m_units),
+ ("MassInMaxRad", m_units),
+ ("MassInRad", m_units)))
+
+_particle_fields = (
+ ("GroupPos_0", (p_units, ["Group", "particle_position_x"], None)),
+ ("GroupPos_1", (p_units, ["Group", "particle_position_y"], None)),
+ ("GroupPos_2", (p_units, ["Group", "particle_position_z"], None)),
+ ("GroupVel_0", (v_units, ["Group", "particle_velocity_x"], None)),
+ ("GroupVel_1", (v_units, ["Group", "particle_velocity_y"], None)),
+ ("GroupVel_2", (v_units, ["Group", "particle_velocity_z"], None)),
+ ("GroupMass", (m_units, ["Group", "particle_mass"], None)),
+ ("GroupLen", ("", ["Group", "particle_number"], None)),
+ ("GroupNsubs", ("", ["Group", "subhalo_number"], None)),
+ ("GroupFirstSub", ("", [], None)),
+ ("Group_M_Crit200", (m_units, [], None)),
+ ("Group_M_Crit500", (m_units, [], None)),
+ ("Group_M_Mean200", (m_units, [], None)),
+ ("Group_M_TopHat200", (m_units, [], None)),
+ ("Group_R_Crit200", (p_units, [], None)),
+ ("Group_R_Crit500", (p_units, [], None)),
+ ("Group_R_Mean200", (p_units, [], None)),
+ ("Group_R_TopHat200", (p_units, [], None)),
+ ("SubhaloPos_0", (p_units, ["Subhalo", "particle_position_x"], None)),
+ ("SubhaloPos_1", (p_units, ["Subhalo", "particle_position_y"], None)),
+ ("SubhaloPos_2", (p_units, ["Subhalo", "particle_position_z"], None)),
+ ("SubhaloVel_0", (v_units, ["Subhalo", "particle_velocity_x"], None)),
+ ("SubhaloVel_1", (v_units, ["Subhalo", "particle_velocity_y"], None)),
+ ("SubhaloVel_2", (v_units, ["Subhalo", "particle_velocity_z"], None)),
+ ("SubhaloMass", (m_units, ["Subhalo", "particle_mass"], None)),
+ ("SubhaloLen", ("", ["Subhalo", "particle_number"], None)),
+ ("SubhaloCM_0", (p_units, ["Subhalo", "center_of_mass_x"], None)),
+ ("SubhaloCM_1", (p_units, ["Subhalo", "center_of_mass_y"], None)),
+ ("SubhaloCM_2", (p_units, ["Subhalo", "center_of_mass_z"], None)),
+ ("SubhaloSpin_0", ("", ["Subhalo", "spin_x"], None)),
+ ("SubhaloSpin_1", ("", ["Subhalo", "spin_y"], None)),
+ ("SubhaloSpin_2", ("", ["Subhalo", "spin_z"], None)),
+ ("SubhaloGrNr", ("", ["Subhalo", "group_identifier"], None)),
+ ("SubhaloHalfmassRad", (p_units, [], None)),
+ ("SubhaloIDMostbound", ("", [], None)),
+ ("SubhaloMassInHalfRad", (m_units, [], None)),
+ ("SubhaloMassInMaxRad", (m_units, [], None)),
+ ("SubhaloMassInRad", (m_units, [], None)),
+ ("SubhaloParent", ("", [], None)),
+ ("SubhaloVelDisp", (v_units, ["Subhalo", "velocity_dispersion"], None)),
+ ("SubhaloVmax", (v_units, [], None)),
+ ("SubhaloVmaxRad", (p_units, [], None)),
+) + _type_fields + _sub_type_fields
+
class GadgetFOFFieldInfo(FieldInfoContainer):
known_other_fields = (
)
- known_particle_fields = (
- ("GroupPos_0", (p_units, ["Group", "particle_position_x"], None)),
- ("GroupPos_1", (p_units, ["Group", "particle_position_y"], None)),
- ("GroupPos_2", (p_units, ["Group", "particle_position_z"], None)),
- ("GroupVel_0", (v_units, ["Group", "particle_velocity_x"], None)),
- ("GroupVel_1", (v_units, ["Group", "particle_velocity_y"], None)),
- ("GroupVel_2", (v_units, ["Group", "particle_velocity_z"], None)),
- ("GroupMass", (m_units, ["Group", "particle_mass"], None)),
- ("GroupLen", ("", ["Group", "particle_number"], None)),
- ("SubhaloPos_0", (p_units, ["Subhalo", "particle_position_x"], None)),
- ("SubhaloPos_1", (p_units, ["Subhalo", "particle_position_y"], None)),
- ("SubhaloPos_2", (p_units, ["Subhalo", "particle_position_z"], None)),
- ("SubhaloVel_0", (v_units, ["Subhalo", "particle_velocity_x"], None)),
- ("SubhaloVel_1", (v_units, ["Subhalo", "particle_velocity_y"], None)),
- ("SubhaloVel_2", (v_units, ["Subhalo", "particle_velocity_z"], None)),
- ("SubhaloMass", (m_units, ["Subhalo", "particle_mass"], None)),
- ("SubhaloLen", ("", ["Subhalo", "particle_number"], None)),
- )
+ known_particle_fields = _particle_fields
# these are extra fields to be created for the "all" particle type
extra_union_fields = (
@@ -61,22 +103,5 @@
known_other_fields = (
)
- known_particle_fields = (
- ("GroupPos_0", (p_units, ["Group", "particle_position_x"], None)),
- ("GroupPos_1", (p_units, ["Group", "particle_position_y"], None)),
- ("GroupPos_2", (p_units, ["Group", "particle_position_z"], None)),
- ("GroupVel_0", (v_units, ["Group", "particle_velocity_x"], None)),
- ("GroupVel_1", (v_units, ["Group", "particle_velocity_y"], None)),
- ("GroupVel_2", (v_units, ["Group", "particle_velocity_z"], None)),
- ("GroupMass", (m_units, ["Group", "particle_mass"], None)),
- ("GroupLen", ("", ["Group", "particle_number"], None)),
- ("SubhaloPos_0", (p_units, ["Subhalo", "particle_position_x"], None)),
- ("SubhaloPos_1", (p_units, ["Subhalo", "particle_position_y"], None)),
- ("SubhaloPos_2", (p_units, ["Subhalo", "particle_position_z"], None)),
- ("SubhaloVel_0", (v_units, ["Subhalo", "particle_velocity_x"], None)),
- ("SubhaloVel_1", (v_units, ["Subhalo", "particle_velocity_y"], None)),
- ("SubhaloVel_2", (v_units, ["Subhalo", "particle_velocity_z"], None)),
- ("SubhaloMass", (m_units, ["Subhalo", "particle_mass"], None)),
- ("SubhaloLen", ("", ["Subhalo", "particle_number"], None)),
- ("ID", ("", ["Group", "member_ids"], None)),
- )
+ known_particle_fields = _particle_fields + \
+ (("ID", ("", ["member_ids"], None)),)
https://bitbucket.org/yt_analysis/yt/commits/617b85e4fb58/
Changeset: 617b85e4fb58
Branch: yt
User: brittonsmith
Date: 2015-12-28 22:40:28+00:00
Summary: Removing type dependent particle count since they are all stored together.
Affected #: 1 file
diff -r 68d7239294cfe7763a62de3b0ee70240b021a4d7 -r 617b85e4fb58500ffd78b65e6ca3654da2b9b1fd yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -120,8 +120,7 @@
self.header = \
dict((str(field), val)
for field, val in f["Header"].attrs.items())
- self.total_ids = {"Group": self.header["Nids_ThisFile"],
- "Subhalo": 0} # subhalos not yet supported
+ self.total_ids = self.header["Nids_ThisFile"]
self.total_particles = \
{"Group": self.header["Ngroups_ThisFile"],
"Subhalo": self.header["Nsubgroups_ThisFile"]}
@@ -302,17 +301,11 @@
itself.
"""
- all_ids = \
- dict([(ptype, np.array([data_file.total_ids[ptype]
- for data_file in self.data_files]))
- for ptype in self.ds.particle_types_raw])
+ all_ids = np.array([data_file.total_ids
+ for data_file in self.data_files])
- self._halo_id_end = \
- dict([(ptype, val.cumsum())
- for ptype, val in all_ids.items()])
- self._halo_id_start = \
- dict([(ptype, val - all_ids[ptype])
- for ptype, val in self._halo_id_end.items()])
+ self._halo_id_end = all_ids.cumsum()
+ self._halo_id_start = self._halo_id_end - all_ids
def _detect_output_fields(self):
field_list = []
@@ -432,19 +425,19 @@
self.particle_number = halo_size[self.scalar_index]
i_start = np.digitize([all_id_start],
- self.index._halo_id_start[ptype],
+ self.index._halo_id_start,
right=False)[0] - 1
i_end = np.digitize([all_id_start+self.particle_number],
- self.index._halo_id_end[ptype],
+ self.index._halo_id_end,
right=True)[0]
self.field_data_files = self.index.data_files[i_start:i_end+1]
self.field_data_start = \
(all_id_start -
- self.index._halo_id_start[ptype][i_start:i_end+1]).clip(min=0)
+ self.index._halo_id_start[i_start:i_end+1]).clip(min=0)
self.field_data_end = \
(all_id_start + self.particle_number -
- self.index._halo_id_start[ptype][i_start:i_end+1]).clip(
- max=self.index._halo_id_end[ptype][i_start:i_end+1])
+ self.index._halo_id_start[i_start:i_end+1]).clip(
+ max=self.index._halo_id_end[i_start:i_end+1])
self.all_id_start = all_id_start
for attr in ["mass", "position", "velocity"]:
https://bitbucket.org/yt_analysis/yt/commits/0c0efcea9a68/
Changeset: 0c0efcea9a68
Branch: yt
User: brittonsmith
Date: 2015-12-29 01:53:47+00:00
Summary: Removing the resize of the vector field arrays since the shape is already correct.
Affected #: 1 file
diff -r 617b85e4fb58500ffd78b65e6ca3654da2b9b1fd -r 0c0efcea9a68c9e61c7a750ffde32ab78f418e55 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -107,7 +107,6 @@
field_data = f[ptype][fname].value.astype("float64")
my_div = field_data.size / pcount
if my_div > 1:
- field_data = np.resize(field_data, (pcount, my_div))
findex = int(field[field.rfind("_") + 1:])
field_data = field_data[:, findex]
data = field_data[mask]
@@ -244,7 +243,6 @@
field_data = f[ptype][fname].value.astype("float64")
my_div = field_data.size / pcount
if my_div > 1:
- field_data = np.resize(field_data, (pcount, my_div))
findex = int(field[field.rfind("_") + 1:])
field_data = field_data[:, findex]
data = np.array([field_data[dobj.scalar_index]])
@@ -274,7 +272,6 @@
f["IDs"][fname][start_index:end_index].astype("float64")
my_div = my_data.size / pcount
if my_div > 1:
- my_data = np.resize(my_data, (pcount, my_div))
findex = int(field[field.rfind("_") + 1:])
my_data = my_data[:, findex]
field_data[field_start:field_end] = my_data
https://bitbucket.org/yt_analysis/yt/commits/61faeef46f5d/
Changeset: 61faeef46f5d
Branch: yt
User: brittonsmith
Date: 2015-12-29 03:35:44+00:00
Summary: Begin work on supporting subhalo containers. Also, precalculate some more helpful numbers to reduce file io and set current particle type for determine_fields.
Affected #: 1 file
diff -r 0c0efcea9a68c9e61c7a750ffde32ab78f418e55 -r 61faeef46f5da3ad42608d8994b74bd19b9a128f yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -120,11 +120,15 @@
self.header = \
dict((str(field), val)
for field, val in f["Header"].attrs.items())
+ self.group_length_sum = f["Group/GroupLen"].value.sum() \
+ if "Group/GroupLen" in f else 0
+ self.group_subs_sum = f["Group/GroupNsubs"].value.sum() \
+ if "Group/GroupNsubs" in f else 0
self.total_ids = self.header["Nids_ThisFile"]
self.total_particles = \
{"Group": self.header["Ngroups_ThisFile"],
"Subhalo": self.header["Nsubgroups_ThisFile"]}
- self.total_offset = 0 # need to figure out how subfind works here
+ self.total_offset = 0 # I think this is no longer needed
super(GadgetFOFHDF5File, self).__init__(ds, io, filename, file_id)
class GadgetFOFDataset(Dataset):
@@ -307,6 +311,10 @@
self._halo_id_end = all_ids.cumsum()
self._halo_id_start = self._halo_id_end - all_ids
+ self._group_length_sum = \
+ np.array([data_file.group_length_sum
+ for data_file in self.data_files])
+
def _detect_output_fields(self):
field_list = []
scalar_field_list = []
@@ -391,10 +399,9 @@
if ptype not in ds.particle_types_raw:
raise RuntimeError("Possible halo types are %s, supplied \"%s\"." %
(ds.particle_types_raw, ptype))
- if ptype == "Subhalo":
- raise NotImplementedError()
self.ptype = ptype
+ self._current_particle_type = ptype
self.particle_identifier = particle_identifier
super(GagdetFOFHaloContainer, self).__init__(ds, {})
@@ -414,16 +421,17 @@
self.scalar_data_file = self.index.data_files[i_scalar]
# Find the files that have the member particles for this halo.
- all_id_start = 0
- for data_file in self.index.data_files[:i_scalar]:
- with h5py.File(data_file.filename, "r") as f:
- all_id_start += int(f[ptype]["GroupLen"].value.sum())
-
+ all_id_start = self.index._group_length_sum[:i_scalar].sum()
with h5py.File(self.scalar_data_file.filename, "r") as f:
- halo_size = f[ptype]["GroupLen"].value
+ halo_size = f[ptype]["%sLen" % ptype].value
all_id_start += int(halo_size[:self.scalar_index].sum())
self.particle_number = halo_size[self.scalar_index]
+ # If a subhalo, find the index of the parent
+ if ptype == "Subhalo":
+ self.group_identifier = \
+ f["Subhalo/SubhaloGrNr"][self.scalar_index]
+
i_start = np.digitize([all_id_start],
self.index._halo_id_start,
right=False)[0] - 1
https://bitbucket.org/yt_analysis/yt/commits/27e07752d7dd/
Changeset: 27e07752d7dd
Branch: yt
User: brittonsmith
Date: 2015-12-29 17:08:44+00:00
Summary: Subhalo IDs now accessible.
Affected #: 2 files
diff -r 61faeef46f5da3ad42608d8994b74bd19b9a128f -r 27e07752d7dd2dc6bd95bd44a2fa56beb5217bd5 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -355,6 +355,42 @@
dobj, fields_to_read)
return fields_to_return, fields_to_generate
+ def _get_halo_values(self, ptype, identifiers, fields,
+ f=None):
+ """
+ Get field values for halos. IDs are likely to be
+ sequential (or at least monotonic), but not necessarily
+ all within the same file.
+
+ This does not do much to minimize file i/o, but with
+ halos randomly distributed across files, there's not
+ much more we can do.
+ """
+
+ # if a file is already open, don't open it again
+ filename = None if f is None \
+ else f.filename
+
+ data = defaultdict(lambda: np.empty(identifiers.size))
+ i_scalars = np.digitize(identifiers,
+ self._halo_index_start[ptype], right=False) - 1
+ for i_scalar in np.unique(i_scalars):
+ target = i_scalars == i_scalar
+ scalar_indices = identifiers - \
+ self._halo_index_start[ptype][i_scalar]
+
+ # only open file if it's not already open
+ my_f = f if self.data_files[i_scalar].filename == filename \
+ else h5py.File(self.data_files[i_scalar].filename, "r")
+
+ for field in fields:
+ data[field][target] = \
+ my_f[os.path.join(ptype, field)].value[scalar_indices[target]]
+
+ if self.data_files[i_scalar].filename != filename: my_f.close()
+
+ return data
+
class GadgetFOFHaloDataset(Dataset):
_index_class = GadgetFOFHaloParticleIndex
_file_class = GadgetFOFHDF5File
@@ -424,14 +460,38 @@
all_id_start = self.index._group_length_sum[:i_scalar].sum()
with h5py.File(self.scalar_data_file.filename, "r") as f:
halo_size = f[ptype]["%sLen" % ptype].value
- all_id_start += int(halo_size[:self.scalar_index].sum())
+ all_id_start += np.int64(halo_size[:self.scalar_index].sum())
self.particle_number = halo_size[self.scalar_index]
- # If a subhalo, find the index of the parent
+ # If a subhalo, find the index of the parent.
if ptype == "Subhalo":
self.group_identifier = \
f["Subhalo/SubhaloGrNr"][self.scalar_index]
+ # Find the file that has the scalar values for the parent.
+ ip_scalar = np.digitize([self.group_identifier],
+ self.index._halo_index_start["Group"],
+ right=False)[0] - 1
+
+ p_data = self.index._get_halo_values(
+ "Group", np.array([self.group_identifier]),
+ ["GroupNsubs", "GroupFirstSub"], f=f)
+ self.subgroup_identifier = self.particle_identifier - \
+ np.int64(p_data["GroupFirstSub"][0])
+ parent_subhalos = p_data["GroupNsubs"][0]
+
+ mylog.debug("Subhalo %d is subgroup %s of %d in group %d." % \
+ (self.particle_identifier, self.subgroup_identifier,
+ parent_subhalos, self.group_identifier))
+
+ sub_ids = np.arange(self.particle_identifier -
+ self.subgroup_identifier,
+ self.particle_identifier)
+ sub_data = self.index._get_halo_values(
+ "Subhalo", sub_ids, ["SubhaloLen"], f=f)
+ id_offset = np.int64(sub_data["SubhaloLen"].sum())
+ all_id_start += id_offset
+
i_start = np.digitize([all_id_start],
self.index._halo_id_start,
right=False)[0] - 1
@@ -442,11 +502,13 @@
self.field_data_start = \
(all_id_start -
self.index._halo_id_start[i_start:i_end+1]).clip(min=0)
+ self.field_data_start = self.field_data_start.astype(np.int64)
self.field_data_end = \
(all_id_start + self.particle_number -
self.index._halo_id_start[i_start:i_end+1]).clip(
max=self.index._halo_id_end[i_start:i_end+1])
- self.all_id_start = all_id_start
+ self.field_data_end = self.field_data_end.astype(np.int64)
+ self.all_id_start = np.int64(all_id_start)
for attr in ["mass", "position", "velocity"]:
setattr(self, attr, self[self.ptype, "particle_%s" % attr][0])
diff -r 61faeef46f5da3ad42608d8994b74bd19b9a128f -r 27e07752d7dd2dc6bd95bd44a2fa56beb5217bd5 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -311,7 +311,6 @@
scalar_fields.extend(my_fields)
if "IDs" not in f: continue
- if ptype == "Subhalo": continue
fields.extend([(ptype, field) for field in f["IDs"]])
return fields, scalar_fields, {}
https://bitbucket.org/yt_analysis/yt/commits/43956a1667d3/
Changeset: 43956a1667d3
Branch: yt
User: brittonsmith
Date: 2015-12-29 17:10:36+00:00
Summary: Changing defaultdict to use a lambda.
Affected #: 1 file
diff -r 27e07752d7dd2dc6bd95bd44a2fa56beb5217bd5 -r 43956a1667d3ef3ba7df3d8520c54571dd44f433 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -250,9 +250,8 @@
return all_data
def _read_member_fields(self, dobj, member_fields):
- def field_array():
- return np.empty(dobj.particle_number, dtype=np.float64)
- all_data = defaultdict(field_array)
+ all_data = defaultdict(lambda: np.empty(dobj.particle_number,
+ dtype=np.float64))
if not member_fields: return all_data
field_start = 0
for i, data_file in enumerate(dobj.field_data_files):
https://bitbucket.org/yt_analysis/yt/commits/fc1809ef70df/
Changeset: fc1809ef70df
Branch: yt
User: brittonsmith
Date: 2015-12-30 22:00:21+00:00
Summary: Fixing subhalo particle access to work correctly and modularizing a bit.
Affected #: 1 file
diff -r 43956a1667d3ef3ba7df3d8520c54571dd44f433 -r fc1809ef70dfd5e554944eac0f54c7281433dbf9 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -355,6 +355,15 @@
dobj, fields_to_read)
return fields_to_return, fields_to_generate
+ def _get_halo_file_indices(self, ptype, identifiers):
+ return np.digitize(identifiers,
+ self._halo_index_start[ptype], right=False) - 1
+
+ def _get_halo_scalar_index(self, ptype, identifier):
+ i_scalar = self._get_halo_file_indices(ptype, [identifier])[0]
+ scalar_index = identifier - self._halo_index_start[ptype][i_scalar]
+ return scalar_index
+
def _get_halo_values(self, ptype, identifiers, fields,
f=None):
"""
@@ -372,8 +381,7 @@
else f.filename
data = defaultdict(lambda: np.empty(identifiers.size))
- i_scalars = np.digitize(identifiers,
- self._halo_index_start[ptype], right=False) - 1
+ i_scalars = self._get_halo_file_indices(ptype, identifiers)
for i_scalar in np.unique(i_scalars):
target = i_scalars == i_scalar
scalar_indices = identifiers - \
@@ -447,51 +455,72 @@
self.index.particle_count[ptype], ptype))
# Find the file that has the scalar values for this halo.
- i_scalar = np.digitize([particle_identifier],
- self.index._halo_index_start[ptype],
- right=False)[0] - 1
+ i_scalar = self.index._get_halo_file_indices(
+ ptype, [particle_identifier])[0]
+ self.scalar_data_file = self.index.data_files[i_scalar]
# index within halo arrays that corresponds to this halo
- self.scalar_index = self.particle_identifier - \
- self.index._halo_index_start[ptype][i_scalar]
- self.scalar_data_file = self.index.data_files[i_scalar]
+ self.scalar_index = self.index._get_halo_scalar_index(
+ ptype, self.particle_identifier)
- # Find the files that have the member particles for this halo.
- all_id_start = self.index._group_length_sum[:i_scalar].sum()
- with h5py.File(self.scalar_data_file.filename, "r") as f:
- halo_size = f[ptype]["%sLen" % ptype].value
- all_id_start += np.int64(halo_size[:self.scalar_index].sum())
- self.particle_number = halo_size[self.scalar_index]
+ my_data = self.index._get_halo_values(
+ ptype, np.array([particle_identifier]),
+ ["%sLen" % ptype])
+ self.particle_number = my_data["%sLen" % ptype]
- # If a subhalo, find the index of the parent.
- if ptype == "Subhalo":
- self.group_identifier = \
- f["Subhalo/SubhaloGrNr"][self.scalar_index]
+ if ptype == "Group":
+ self.group_identifier = self.particle_identifier
+ id_offset = 0
+ # index of file that has scalar values for the group
+ g_scalar = i_scalar
+ group_index = self.scalar_index
- # Find the file that has the scalar values for the parent.
- ip_scalar = np.digitize([self.group_identifier],
- self.index._halo_index_start["Group"],
- right=False)[0] - 1
+ # If a subhalo, find the index of the parent.
+ elif ptype == "Subhalo":
+ my_data = self.index._get_halo_values(
+ "Subhalo", np.array([particle_identifier]),
+ ["SubhaloGrNr"])
+ self.group_identifier = np.int64(my_data["SubhaloGrNr"])
- p_data = self.index._get_halo_values(
- "Group", np.array([self.group_identifier]),
- ["GroupNsubs", "GroupFirstSub"], f=f)
- self.subgroup_identifier = self.particle_identifier - \
- np.int64(p_data["GroupFirstSub"][0])
- parent_subhalos = p_data["GroupNsubs"][0]
+ # Find the file that has the scalar values for the parent group.
+ g_scalar = self.index._get_halo_file_indices(
+ "Group", [self.group_identifier])[0]
- mylog.debug("Subhalo %d is subgroup %s of %d in group %d." % \
- (self.particle_identifier, self.subgroup_identifier,
- parent_subhalos, self.group_identifier))
+ # index within halo arrays that corresponds to the paent group
+ group_index = self.index._get_halo_scalar_index(
+ "Group", self.group_identifier)
- sub_ids = np.arange(self.particle_identifier -
- self.subgroup_identifier,
- self.particle_identifier)
- sub_data = self.index._get_halo_values(
- "Subhalo", sub_ids, ["SubhaloLen"], f=f)
- id_offset = np.int64(sub_data["SubhaloLen"].sum())
- all_id_start += id_offset
+ my_data = self.index._get_halo_values(
+ "Group", np.array([self.group_identifier]),
+ ["GroupNsubs", "GroupFirstSub"])
+ self.subgroup_identifier = self.particle_identifier - \
+ np.int64(my_data["GroupFirstSub"][0])
+ parent_subhalos = my_data["GroupNsubs"][0]
+ mylog.debug("Subhalo %d is subgroup %s of %d in group %d." % \
+ (self.particle_identifier, self.subgroup_identifier,
+ parent_subhalos, self.group_identifier))
+
+ # ids of the sibling subhalos that come before this one
+ sub_ids = np.arange(
+ self.particle_identifier - self.subgroup_identifier,
+ self.particle_identifier)
+ my_data = self.index._get_halo_values(
+ "Subhalo", sub_ids, ["SubhaloLen"])
+ id_offset = my_data["SubhaloLen"].sum(dtype=np.int64)
+
+ # Calculate the starting index for the member particles.
+ # First, add up all the particles in the earlier files.
+ all_id_start = self.index._group_length_sum[:g_scalar].sum(dtype=np.int64)
+
+ # Now add the halos in this file that come before.
+ with h5py.File(self.index.data_files[g_scalar].filename, "r") as f:
+ all_id_start += f["Group"]["GroupLen"][:group_index].sum(dtype=np.int64)
+
+ # Add the subhalo offset.
+ all_id_start += id_offset
+
+ # indices of first and last files containing member particles
i_start = np.digitize([all_id_start],
self.index._halo_id_start,
right=False)[0] - 1
@@ -499,6 +528,8 @@
self.index._halo_id_end,
right=True)[0]
self.field_data_files = self.index.data_files[i_start:i_end+1]
+
+ # starting and ending indices for each file containing particles
self.field_data_start = \
(all_id_start -
self.index._halo_id_start[i_start:i_end+1]).clip(min=0)
@@ -508,7 +539,6 @@
self.index._halo_id_start[i_start:i_end+1]).clip(
max=self.index._halo_id_end[i_start:i_end+1])
self.field_data_end = self.field_data_end.astype(np.int64)
- self.all_id_start = np.int64(all_id_start)
for attr in ["mass", "position", "velocity"]:
setattr(self, attr, self[self.ptype, "particle_%s" % attr][0])
https://bitbucket.org/yt_analysis/yt/commits/622968405e97/
Changeset: 622968405e97
Branch: yt
User: brittonsmith
Date: 2015-12-30 22:09:11+00:00
Summary: Eliminating one file open.
Affected #: 1 file
diff -r fc1809ef70dfd5e554944eac0f54c7281433dbf9 -r 622968405e97dbdc1a5c9c17f047c05bc7bfa86d yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -463,9 +463,11 @@
self.scalar_index = self.index._get_halo_scalar_index(
ptype, self.particle_identifier)
+ halo_fields = ["%sLen" % ptype]
+ if ptype == "Subhalo": halo_fields.append("SubhaloGrNr")
my_data = self.index._get_halo_values(
ptype, np.array([particle_identifier]),
- ["%sLen" % ptype])
+ halo_fields)
self.particle_number = my_data["%sLen" % ptype]
if ptype == "Group":
@@ -477,9 +479,6 @@
# If a subhalo, find the index of the parent.
elif ptype == "Subhalo":
- my_data = self.index._get_halo_values(
- "Subhalo", np.array([particle_identifier]),
- ["SubhaloGrNr"])
self.group_identifier = np.int64(my_data["SubhaloGrNr"])
# Find the file that has the scalar values for the parent group.
https://bitbucket.org/yt_analysis/yt/commits/def7bec4facb/
Changeset: def7bec4facb
Branch: yt
User: brittonsmith
Date: 2015-12-30 22:39:53+00:00
Summary: Adding support for subhalos ids given as tuples.
Affected #: 1 file
diff -r 622968405e97dbdc1a5c9c17f047c05bc7bfa86d -r def7bec4facbfc281f1b30ad0f7c52789a8f8296 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -446,9 +446,19 @@
self.ptype = ptype
self._current_particle_type = ptype
- self.particle_identifier = particle_identifier
super(GagdetFOFHaloContainer, self).__init__(ds, {})
+ if ptype == "Subhalo" and isinstance(particle_identifier, tuple):
+ self.group_identifier, self.subgroup_identifier = \
+ particle_identifier
+ my_data = self.index._get_halo_values(
+ "Group", np.array([self.group_identifier]),
+ ["GroupFirstSub"])
+ self.particle_identifier = \
+ np.int64(my_data["GroupFirstSub"] + self.subgroup_identifier)
+ else:
+ self.particle_identifier = particle_identifier
+
if self.particle_identifier >= self.index.particle_count[ptype]:
raise RuntimeError("%s %d requested, but only %d %s objects exist." %
(ptype, particle_identifier,
@@ -456,7 +466,7 @@
# Find the file that has the scalar values for this halo.
i_scalar = self.index._get_halo_file_indices(
- ptype, [particle_identifier])[0]
+ ptype, [self.particle_identifier])[0]
self.scalar_data_file = self.index.data_files[i_scalar]
# index within halo arrays that corresponds to this halo
@@ -466,7 +476,7 @@
halo_fields = ["%sLen" % ptype]
if ptype == "Subhalo": halo_fields.append("SubhaloGrNr")
my_data = self.index._get_halo_values(
- ptype, np.array([particle_identifier]),
+ ptype, np.array([self.particle_identifier]),
halo_fields)
self.particle_number = my_data["%sLen" % ptype]
https://bitbucket.org/yt_analysis/yt/commits/a48c02054b27/
Changeset: a48c02054b27
Branch: yt
User: brittonsmith
Date: 2015-12-30 23:59:12+00:00
Summary: Fixing bug in calculation of id arrays.
Affected #: 1 file
diff -r def7bec4facbfc281f1b30ad0f7c52789a8f8296 -r a48c02054b27611748aa6a2cf92a11c34d71020c yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -305,11 +305,10 @@
itself.
"""
- all_ids = np.array([data_file.total_ids
+ self._halo_id_number = np.array([data_file.total_ids
for data_file in self.data_files])
-
- self._halo_id_end = all_ids.cumsum()
- self._halo_id_start = self._halo_id_end - all_ids
+ self._halo_id_end = self._halo_id_number.cumsum()
+ self._halo_id_start = self._halo_id_end - self._halo_id_number
self._group_length_sum = \
np.array([data_file.group_length_sum
@@ -455,7 +454,7 @@
"Group", np.array([self.group_identifier]),
["GroupFirstSub"])
self.particle_identifier = \
- np.int64(my_data["GroupFirstSub"] + self.subgroup_identifier)
+ np.int64(my_data["GroupFirstSub"][0] + self.subgroup_identifier)
else:
self.particle_identifier = particle_identifier
@@ -478,7 +477,7 @@
my_data = self.index._get_halo_values(
ptype, np.array([self.particle_identifier]),
halo_fields)
- self.particle_number = my_data["%sLen" % ptype]
+ self.particle_number = np.int64(my_data["%sLen" % ptype][0])
if ptype == "Group":
self.group_identifier = self.particle_identifier
@@ -489,7 +488,7 @@
# If a subhalo, find the index of the parent.
elif ptype == "Subhalo":
- self.group_identifier = np.int64(my_data["SubhaloGrNr"])
+ self.group_identifier = np.int64(my_data["SubhaloGrNr"][0])
# Find the file that has the scalar values for the parent group.
g_scalar = self.index._get_halo_file_indices(
@@ -546,7 +545,7 @@
self.field_data_end = \
(all_id_start + self.particle_number -
self.index._halo_id_start[i_start:i_end+1]).clip(
- max=self.index._halo_id_end[i_start:i_end+1])
+ max=self.index._halo_id_number[i_start:i_end+1])
self.field_data_end = self.field_data_end.astype(np.int64)
for attr in ["mass", "position", "velocity"]:
https://bitbucket.org/yt_analysis/yt/commits/e6858cc23b74/
Changeset: e6858cc23b74
Branch: yt
User: brittonsmith
Date: 2015-12-31 14:21:01+00:00
Summary: Adding docstring.
Affected #: 1 file
diff -r a48c02054b27611748aa6a2cf92a11c34d71020c -r e6858cc23b749930e6eddbcdc3e5ee501fe23945 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -152,6 +152,14 @@
super(GadgetFOFDataset, self).__init__(filename, dataset_type,
units_override=units_override)
+ @property
+ def halos_field_list(self):
+ return self._halos_ds.field_list
+
+ @property
+ def halos_derived_field_list(self):
+ return self._halos_ds.derived_field_list
+
_instantiated_halo_ds = None
@property
def _halos_ds(self):
@@ -161,7 +169,7 @@
def _setup_classes(self):
super(GadgetFOFDataset, self)._setup_classes()
- self.halo = partial(GagdetFOFHaloContainer, self._halos_ds)
+ self.halo = partial(GagdetFOFHaloContainer, ds=self._halos_ds)
def _parse_parameter_file(self):
with h5py.File(self.parameter_filename,"r") as f:
@@ -436,9 +444,83 @@
self.objects = []
class GagdetFOFHaloContainer(YTSelectionContainer):
+ """
+ Create a data container to get member particles and individual
+ values from halos and subhalos. Halo mass, position, and
+ velocity are set as attributes. Halo IDs are accessible
+ through the field, "member_ids". Other fields that are one
+ value per halo are accessible as normal. The field list for
+ halo objects can be seen in `ds.halos_field_list`.
+
+ Parameters
+ ----------
+ ptype : string
+ The type of halo, either "Group" for the main halo or
+ "Subhalo" for subhalos.
+ particle_identifier : int or tuple of (int, int)
+ The halo or subhalo id. If requesting a subhalo, the id
+ can also be given as a tuple of the main halo id and
+ subgroup id, such as (1, 4) for subgroup 4 of halo 1.
+
+ Halo Container Attributes
+ -------------------------
+ particle_identifier : int
+ The id of the halo or subhalo.
+ group_identifier : int
+ For subhalos, the id of the enclosing halo.
+ subgroup_identifier : int
+ For subhalos, the relative id of the subhalo within
+ the enclosing halo.
+ particle_number : int
+ Number of particles in the halo.
+ mass : float
+ Halo mass.
+ position : array of floats
+ Halo position.
+ velocity : array of floats
+ Halo velocity.
+
+ Relevant Fields
+ ---------------
+ particle_number :
+ number of particles
+ subhalo_number :
+ number of subhalos
+ group_identifier :
+ id of parent group for subhalos
+
+ Examples
+ --------
+
+ >>> import yt
+ >>> ds = yt.load("gadget_halos/data/groups_298/fof_subhalo_tab_298.0.hdf5")
+ >>>
+ >>> halo = ds.halo("Group", 0)
+ >>> print halo.mass
+ 13256.5517578 code_mass
+ >>> print halo.position
+ [ 16.18603706 6.95965052 12.52694607] code_length
+ >>> print halo.velocity
+ [ 6943694.22793569 -762788.90647454 -794749.63819757] cm/s
+ >>> print halo["Group_R_Crit200"]
+ [ 0.79668683] code_length
+ >>>
+ >>> # particle ids for this halo
+ >>> print halo["member_ids"]
+ >>>
+ >>> # get the first subhalo of this halo
+ >>> subhalo = ds.halo("Subhalo", (0, 0))
+ [ 723631. 690744. 854212. ..., 608589. 905551. 1147449.] dimensionless
+ >>> print subhalo["member_ids"]
+ [ 723631. 690744. 854212. ..., 808362. 956359. 1248821.] dimensionless
+
+ """
+
+ _type_name = "halo"
+ _con_args = ("ptype", "particle_identifier")
_spatial = False
- def __init__(self, ds, ptype, particle_identifier):
+ def __init__(self, ptype, particle_identifier, ds=None):
if ptype not in ds.particle_types_raw:
raise RuntimeError("Possible halo types are %s, supplied \"%s\"." %
(ds.particle_types_raw, ptype))
https://bitbucket.org/yt_analysis/yt/commits/f97a37a5559a/
Changeset: f97a37a5559a
Branch: yt
User: brittonsmith
Date: 2015-12-31 16:06:06+00:00
Summary: Adding docs for halo containers.
Affected #: 1 file
diff -r e6858cc23b749930e6eddbcdc3e5ee501fe23945 -r f97a37a5559a394483938214458bbc95eb6db287 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1232,10 +1232,9 @@
yt has support for reading halo catalogs produced by Rockstar and the inline
FOF/SUBFIND halo finders of Gadget and OWLS. The halo catalogs are treated as
-particle datasets where each particle represents a single halo. At this time,
-yt does not have the ability to load the member particles for a given halo.
-However, once loaded, further halo analysis can be performed using
-:ref:`halo_catalog`.
+particle datasets where each particle represents a single halo. Member particles
+for individual halos can be accessed through halo data containers. Further halo
+analysis can be performed using :ref:`halo_catalog`.
In the case where halo catalogs are written to multiple files, one must only
give the path to one of them.
@@ -1269,11 +1268,39 @@
# x component of the spin
print ad["Subhalo", "SubhaloSpin_0"]
+Halo member particles are accessed by creating halo data containers with the
+type of halo ("Group" or "Subhalo") and the halo id. Scalar values for halos
+can be accessed in the same way. Halos also have mass, position, and velocity
+attributes.
+
+.. code-block:: python
+
+ halo = ds.halo("Group", 0)
+ # member particles for this halo
+ print halo["member_ids"]
+ # halo virial radius
+ print halo["Group_R_Crit200"]
+ # halo mass
+ print halo.mass
+
+Subhalos containers can be created using either their absolute ids or their
+subhalo ids.
+
+.. code-block:: python
+
+ # first subhalo of the first halo
+ subhalo = ds.halo("Subhalo", (0, 0))
+ # this subhalo's absolute id
+ print subhalo.group_identifier
+ # member particles
+ print subhalo["member_ids"]
+
OWLS FOF/SUBFIND
^^^^^^^^^^^^^^^^
OWLS halo catalogs have a very similar structure to regular Gadget halo catalogs.
-The two field types are "FOF" and "SUBFIND".
+The two field types are "FOF" and "SUBFIND". At this time, halo member particles
+cannot be loaded.
.. code-block:: python
https://bitbucket.org/yt_analysis/yt/commits/a3cb8d50b86c/
Changeset: a3cb8d50b86c
Branch: yt
User: brittonsmith
Date: 2015-12-31 16:10:31+00:00
Summary: Adding imports.
Affected #: 1 file
diff -r f97a37a5559a394483938214458bbc95eb6db287 -r a3cb8d50b86cfd6528bc72eeac57af44f64c1ceb yt/frontends/gadget_fof/api.py
--- a/yt/frontends/gadget_fof/api.py
+++ b/yt/frontends/gadget_fof/api.py
@@ -15,12 +15,19 @@
#-----------------------------------------------------------------------------
from .data_structures import \
- GadgetFOFDataset
+ GadgetFOFParticleIndex, \
+ GadgetFOFHDF5File, \
+ GadgetFOFDataset, \
+ GadgetFOFHaloParticleIndex, \
+ GadgetFOFHaloDataset, \
+ GagdetFOFHaloContainer
from .io import \
- IOHandlerGadgetFOFHDF5
+ IOHandlerGadgetFOFHDF5, \
+ IOHandlerGadgetFOFHaloHDF5
from .fields import \
- GadgetFOFFieldInfo
+ GadgetFOFFieldInfo, \
+ GadgetFOFHaloFieldInfo
from . import tests
https://bitbucket.org/yt_analysis/yt/commits/dd5521ccd010/
Changeset: dd5521ccd010
Branch: yt
User: brittonsmith
Date: 2015-12-31 16:20:08+00:00
Summary: Adding a test.
Affected #: 1 file
diff -r a3cb8d50b86cfd6528bc72eeac57af44f64c1ceb -r dd5521ccd0109ef629f6a2f4bcc0fb1490b6cc00 yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -51,3 +51,23 @@
@requires_file(g42)
def test_GadgetFOFDataset():
assert isinstance(data_dir_load(g42), GadgetFOFDataset)
+
+# fof/subhalo catalog with member particles
+g298 = "gadget_halos/data/groups_298/fof_subhalo_tab_298.0.hdf5"
+
+ at requires_ds(g298)
+def test_subhalos():
+ ds = data_dir_load(g298)
+ total_sub = 0
+ total_int = 0
+ for hid in range(0, ds.index.particle_count["Group"]):
+ my_h = ds.halo("Group", hid)
+ h_ids = my_h["ID"]
+ for sid in range(my_h["subhalo_number"]):
+ my_s = ds.halo("Subhalo", (my_h.particle_identifier, sid))
+ total_sub += my_s["ID"].size
+ total_int += np.intersect1d(h_ids, my_s["ID"]).size
+
+ # Test that all subhalo particles are contained within
+ # their parent group.
+ yield assert_equal, total_sub, total_int
https://bitbucket.org/yt_analysis/yt/commits/2c04e33a17d1/
Changeset: 2c04e33a17d1
Branch: yt
User: brittonsmith
Date: 2015-12-31 16:29:00+00:00
Summary: Fixing order of output in docstring.
Affected #: 1 file
diff -r dd5521ccd0109ef629f6a2f4bcc0fb1490b6cc00 -r 2c04e33a17d14be903bcab32c21fe91dccf0e5a3 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -507,10 +507,10 @@
>>>
>>> # particle ids for this halo
>>> print halo["member_ids"]
+ [ 723631. 690744. 854212. ..., 608589. 905551. 1147449.] dimensionless
>>>
>>> # get the first subhalo of this halo
>>> subhalo = ds.halo("Subhalo", (0, 0))
- [ 723631. 690744. 854212. ..., 608589. 905551. 1147449.] dimensionless
>>> print subhalo["member_ids"]
[ 723631. 690744. 854212. ..., 808362. 956359. 1248821.] dimensionless
https://bitbucket.org/yt_analysis/yt/commits/2e816e9a3e6a/
Changeset: 2e816e9a3e6a
Branch: yt
User: brittonsmith
Date: 2015-12-31 21:08:56+00:00
Summary: Fixing some flake8 errors.
Affected #: 3 files
diff -r 2c04e33a17d14be903bcab32c21fe91dccf0e5a3 -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -32,14 +32,10 @@
from yt.frontends.gadget_fof.fields import \
GadgetFOFFieldInfo, \
GadgetFOFHaloFieldInfo
-from yt.geometry.geometry_handler import \
- Index
from yt.geometry.particle_geometry_handler import \
ParticleIndex
from yt.utilities.cosmology import \
Cosmology
-from yt.utilities.exceptions import \
- YTException
from yt.utilities.logger import ytLogger as \
mylog
diff -r 2c04e33a17d14be903bcab32c21fe91dccf0e5a3 -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -257,6 +257,7 @@
for i, data_file in enumerate(dobj.field_data_files):
start_index = dobj.field_data_start[i]
end_index = dobj.field_data_end[i]
+ pcount = end_index - start_index
field_end = field_start + end_index - start_index
with h5py.File(data_file.filename, "r") as f:
for ptype, field_list in sorted(member_fields.items()):
diff -r 2c04e33a17d14be903bcab32c21fe91dccf0e5a3 -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -61,12 +61,12 @@
total_sub = 0
total_int = 0
for hid in range(0, ds.index.particle_count["Group"]):
- my_h = ds.halo("Group", hid)
- h_ids = my_h["ID"]
- for sid in range(my_h["subhalo_number"]):
- my_s = ds.halo("Subhalo", (my_h.particle_identifier, sid))
- total_sub += my_s["ID"].size
- total_int += np.intersect1d(h_ids, my_s["ID"]).size
+ my_h = ds.halo("Group", hid)
+ h_ids = my_h["ID"]
+ for sid in range(my_h["subhalo_number"]):
+ my_s = ds.halo("Subhalo", (my_h.particle_identifier, sid))
+ total_sub += my_s["ID"].size
+ total_int += np.intersect1d(h_ids, my_s["ID"]).size
# Test that all subhalo particles are contained within
# their parent group.
https://bitbucket.org/yt_analysis/yt/commits/baf1cf061b74/
Changeset: baf1cf061b74
Branch: yt
User: brittonsmith
Date: 2015-12-31 21:11:54+00:00
Summary: Merging.
Affected #: 37 files
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 tests/nose_runner.py
--- /dev/null
+++ b/tests/nose_runner.py
@@ -0,0 +1,54 @@
+import sys
+import os
+import yaml
+import multiprocessing as mp
+import nose
+import glob
+from contextlib import closing
+from yt.config import ytcfg
+from yt.utilities.answer_testing.framework import AnswerTesting
+
+
+def run_job(argv):
+ with closing(open(str(os.getpid()) + ".out", "w")) as fstderr:
+ cur_stderr = sys.stderr
+ sys.stderr = fstderr
+ answer = argv[0]
+ test_dir = ytcfg.get("yt", "test_data_dir")
+ answers_dir = os.path.join(test_dir, "answers")
+ if not os.path.isdir(os.path.join(answers_dir, answer)):
+ nose.run(argv=argv + ['--answer-store'],
+ addplugins=[AnswerTesting()], exit=False)
+ nose.run(argv=argv, addplugins=[AnswerTesting()], exit=False)
+ sys.stderr = cur_stderr
+
+if __name__ == "__main__":
+ test_dir = ytcfg.get("yt", "test_data_dir")
+ answers_dir = os.path.join(test_dir, "answers")
+ with open('tests/tests_%i.%i.yaml' % sys.version_info[:2], 'r') as obj:
+ tests = yaml.load(obj)
+
+ base_argv = ['--local-dir=%s' % answers_dir, '-v', '-s', '--nologcapture',
+ '--with-answer-testing', '--answer-big-data', '--local']
+ args = [['unittests', '-v', '-s', '--nologcapture']]
+ for answer in list(tests.keys()):
+ argv = [answer]
+ argv += base_argv
+ argv.append('--xunit-file=%s.xml' % answer)
+ argv.append('--answer-name=%s' % answer)
+ argv += tests[answer]
+ args.append(argv)
+
+ processes = [mp.Process(target=run_job, args=(args[i],))
+ for i in range(len(args))]
+ for p in processes:
+ p.start()
+ for p in processes:
+ p.join(timeout=7200)
+ if p.is_alive():
+ p.terminate()
+ p.join(timeout=30)
+ for fname in glob.glob("*.out"):
+ with open(fname, 'r') as fin:
+ print(fin.read())
+ os.remove(fname)
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 tests/tests_2.7.yaml
--- /dev/null
+++ b/tests/tests_2.7.yaml
@@ -0,0 +1,51 @@
+local_artio_270:
+ - yt/frontends/artio/tests/test_outputs.py
+
+local_athena_270:
+ - yt/frontends/athena
+
+local_chombo_270:
+ - yt/frontends/chombo/tests/test_outputs.py
+
+local_enzo_270:
+ - yt/frontends/enzo
+
+local_fits_270:
+ - yt/frontends/fits/tests/test_outputs.py
+
+local_flash_270:
+ - yt/frontends/flash/tests/test_outputs.py
+
+local_gadget_270:
+ - yt/frontends/gadget/tests/test_outputs.py
+
+local_halos_270:
+ - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
+ - yt/analysis_modules/halo_finding/tests/test_rockstar.py
+ - yt/frontends/owls_subfind/tests/test_outputs.py
+
+local_owls_270:
+ - yt/frontends/owls/tests/test_outputs.py
+
+local_pw_270:
+ - yt/visualization/tests/test_plotwindow.py:test_attributes
+ - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+
+local_tipsy_270:
+ - yt/frontends/tipsy/tests/test_outputs.py
+
+local_varia_270:
+ - yt/analysis_modules/radmc3d_export
+ - yt/frontends/moab/tests/test_c5.py
+ - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+ - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+ - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+
+local_orion_270:
+ - yt/frontends/boxlib/tests/test_orion.py
+
+local_ramses_270:
+ - yt/frontends/ramses/tests/test_outputs.py
+
+local_ytdata_270:
+ - yt/frontends/ytdata
\ No newline at end of file
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 tests/tests_3.4.yaml
--- /dev/null
+++ b/tests/tests_3.4.yaml
@@ -0,0 +1,49 @@
+local_artio_340:
+ - yt/frontends/artio/tests/test_outputs.py
+
+local_athena_340:
+ - yt/frontends/athena
+
+local_chombo_340:
+ - yt/frontends/chombo/tests/test_outputs.py
+
+local_enzo_340:
+ - yt/frontends/enzo
+
+local_fits_340:
+ - yt/frontends/fits/tests/test_outputs.py
+
+local_flash_340:
+ - yt/frontends/flash/tests/test_outputs.py
+
+local_gadget_340:
+ - yt/frontends/gadget/tests/test_outputs.py
+
+local_halos_340:
+ - yt/frontends/owls_subfind/tests/test_outputs.py
+
+local_owls_340:
+ - yt/frontends/owls/tests/test_outputs.py
+
+local_pw_340:
+ - yt/visualization/tests/test_plotwindow.py:test_attributes
+ - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+
+local_tipsy_340:
+ - yt/frontends/tipsy/tests/test_outputs.py
+
+local_varia_340:
+ - yt/analysis_modules/radmc3d_export
+ - yt/frontends/moab/tests/test_c5.py
+ - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+ - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+ - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+
+local_orion_340:
+ - yt/frontends/boxlib/tests/test_orion.py
+
+local_ramses_340:
+ - yt/frontends/ramses/tests/test_outputs.py
+
+local_ytdata_340:
+ - yt/frontends/ytdata
\ No newline at end of file
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -47,6 +47,12 @@
"y":("z","x"),
"z":("x","y")}
+def force_unicode(value):
+ if hasattr(value, 'decode'):
+ return value.decode('utf8')
+ else:
+ return value
+
def parse_value(value, default_units):
if isinstance(value, YTQuantity):
return value.in_units(default_units)
@@ -919,27 +925,28 @@
p = f["/parameters"]
parameters["ExposureTime"] = YTQuantity(p["exp_time"].value, "s")
- if isinstance(p["area"].value, (string_types, bytes)):
- parameters["Area"] = p["area"].value.decode("utf8")
+ area = force_unicode(p['area'].value)
+ if isinstance(area, string_types):
+ parameters["Area"] = area
else:
- parameters["Area"] = YTQuantity(p["area"].value, "cm**2")
+ parameters["Area"] = YTQuantity(area, "cm**2")
parameters["Redshift"] = p["redshift"].value
parameters["AngularDiameterDistance"] = YTQuantity(p["d_a"].value, "Mpc")
parameters["sky_center"] = YTArray(p["sky_center"][:], "deg")
parameters["dtheta"] = YTQuantity(p["dtheta"].value, "deg")
parameters["pix_center"] = p["pix_center"][:]
if "rmf" in p:
- parameters["RMF"] = p["rmf"].value.decode("utf8")
+ parameters["RMF"] = force_unicode(p["rmf"].value)
if "arf" in p:
- parameters["ARF"] = p["arf"].value.decode("utf8")
+ parameters["ARF"] = force_unicode(p["arf"].value)
if "channel_type" in p:
- parameters["ChannelType"] = p["channel_type"].value.decode("utf8")
+ parameters["ChannelType"] = force_unicode(p["channel_type"].value)
if "mission" in p:
- parameters["Mission"] = p["mission"].value.decode("utf8")
+ parameters["Mission"] = force_unicode(p["mission"].value)
if "telescope" in p:
- parameters["Telescope"] = p["telescope"].value.decode("utf8")
+ parameters["Telescope"] = force_unicode(p["telescope"].value)
if "instrument" in p:
- parameters["Instrument"] = p["instrument"].value.decode("utf8")
+ parameters["Instrument"] = force_unicode(p["instrument"].value)
d = f["/data"]
events["xpix"] = d["xpix"][:]
@@ -1552,4 +1559,4 @@
d.create_dataset(key, data=f_in[key].value)
f_in.close()
- f_out.close()
\ No newline at end of file
+ f_out.close()
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -21,7 +21,9 @@
get_output_filename, \
ensure_list, \
iterable
-from yt.units.yt_array import array_like_field
+from yt.units.yt_array import \
+ array_like_field, \
+ YTQuantity
from yt.units.unit_object import Unit
from yt.data_objects.data_containers import YTFieldData
from yt.utilities.lib.misc_utilities import \
@@ -35,6 +37,22 @@
NGPDeposit_2
+def _sanitize_min_max_units(amin, amax, finfo, registry):
+ # returns a copy of amin and amax, converted to finfo's output units
+ umin = getattr(amin, 'units', None)
+ umax = getattr(amax, 'units', None)
+ if umin is None:
+ umin = Unit(finfo.output_units, registry=registry)
+ rmin = YTQuantity(amin, umin)
+ else:
+ rmin = amin.in_units(finfo.output_units)
+ if umax is None:
+ umax = Unit(finfo.output_units, registry=registry)
+ rmax = YTQuantity(amax, umax)
+ else:
+ rmax = amax.in_units(finfo.output_units)
+ return rmin, rmax
+
def preserve_source_parameters(func):
def save_state(*args, **kwargs):
# Temporarily replace the 'field_parameters' for a
@@ -67,6 +85,7 @@
self.data_source = data_source
self.ds = data_source.ds
self.field_map = {}
+ self.field_info = {}
self.field_data = YTFieldData()
if weight_field is not None:
self.variance = YTFieldData()
@@ -85,6 +104,8 @@
"""
fields = self.data_source._determine_fields(fields)
+ for f in fields:
+ self.field_info[f] = self.data_source.ds.field_info[f]
temp_storage = ProfileFieldAccumulator(len(fields), self.size)
citer = self.data_source.chunks([], "io")
for chunk in parallel_objects(citer):
@@ -372,9 +393,11 @@
x_n : integer
The number of bins along the x direction.
x_min : float
- The minimum value of the x profile field.
+ The minimum value of the x profile field. If supplied without units,
+ assumed to be in the output units for x_field.
x_max : float
- The maximum value of the x profile field.
+ The maximum value of the x profile field. If supplied without units,
+ assumed to be in the output units for x_field.
x_log : boolean
Controls whether or not the bins for the x field are evenly
spaced in linear (False) or log (True) space.
@@ -385,8 +408,12 @@
def __init__(self, data_source, x_field, x_n, x_min, x_max, x_log,
weight_field = None):
super(Profile1D, self).__init__(data_source, weight_field)
- self.x_field = x_field
+ self.x_field = data_source._determine_fields(x_field)[0]
+ self.field_info[self.x_field] = \
+ self.data_source.ds.field_info[self.x_field]
self.x_log = x_log
+ x_min, x_max = _sanitize_min_max_units(
+ x_min, x_max, self.field_info[self.x_field], self.ds.unit_registry)
self.x_bins = array_like_field(data_source,
self._get_bins(x_min, x_max, x_n, x_log),
self.x_field)
@@ -442,9 +469,11 @@
x_n : integer
The number of bins along the x direction.
x_min : float
- The minimum value of the x profile field.
+ The minimum value of the x profile field. If supplied without units,
+ assumed to be in the output units for x_field.
x_max : float
- The maximum value of the x profile field.
+ The maximum value of the x profile field. If supplied without units,
+ assumed to be in the output units for x_field.
x_log : boolean
Controls whether or not the bins for the x field are evenly
spaced in linear (False) or log (True) space.
@@ -453,9 +482,11 @@
y_n : integer
The number of bins along the y direction.
y_min : float
- The minimum value of the y profile field.
+ The minimum value of the y profile field. If supplied without units,
+ assumed to be in the output units for y_field.
y_max : float
- The maximum value of the y profile field.
+ The maximum value of the y profile field. If supplied without units,
+ assumed to be in the output units for y_field.
y_log : boolean
Controls whether or not the bins for the y field are evenly
spaced in linear (False) or log (True) space.
@@ -469,14 +500,22 @@
weight_field = None):
super(Profile2D, self).__init__(data_source, weight_field)
# X
- self.x_field = x_field
+ self.x_field = data_source._determine_fields(x_field)[0]
self.x_log = x_log
+ self.field_info[self.x_field] = \
+ self.data_source.ds.field_info[self.x_field]
+ x_min, x_max = _sanitize_min_max_units(
+ x_min, x_max, self.field_info[self.x_field], self.ds.unit_registry)
self.x_bins = array_like_field(data_source,
self._get_bins(x_min, x_max, x_n, x_log),
self.x_field)
# Y
- self.y_field = y_field
+ self.y_field = data_source._determine_fields(y_field)[0]
self.y_log = y_log
+ self.field_info[self.y_field] = \
+ self.data_source.ds.field_info[self.y_field]
+ y_min, y_max = _sanitize_min_max_units(
+ y_min, y_max, self.field_info[self.y_field], self.ds.unit_registry)
self.y_bins = array_like_field(data_source,
self._get_bins(y_min, y_max, y_n, y_log),
self.y_field)
@@ -550,17 +589,21 @@
x_n : integer
The number of bins along the x direction.
x_min : float
- The minimum value of the x profile field.
+ The minimum value of the x profile field. If supplied without units,
+ assumed to be in the output units for x_field.
x_max : float
- The maximum value of the x profile field.
+ The maximum value of the x profile field. If supplied without units,
+ assumed to be in the output units for x_field.
y_field : string field name
The field to profile as a function of along the y axis
y_n : integer
The number of bins along the y direction.
y_min : float
- The minimum value of the y profile field.
+ The minimum value of the y profile field. If supplied without units,
+ assumed to be in the output units for y_field.
y_max : float
- The maximum value of the y profile field.
+ The maximum value of the y profile field. If supplied without units,
+ assumed to be in the output units for y_field.
weight_field : string field name
The field to use for weighting. Default is None.
deposition : string, optional
@@ -661,9 +704,11 @@
x_n : integer
The number of bins along the x direction.
x_min : float
- The minimum value of the x profile field.
+ The minimum value of the x profile field. If supplied without units,
+ assumed to be in the output units for x_field.
x_max : float
- The maximum value of the x profile field.
+ The maximum value of the x profile field. If supplied without units,
+ assumed to be in the output units for x_field.
x_log : boolean
Controls whether or not the bins for the x field are evenly
spaced in linear (False) or log (True) space.
@@ -672,9 +717,11 @@
y_n : integer
The number of bins along the y direction.
y_min : float
- The minimum value of the y profile field.
+ The minimum value of the y profile field. If supplied without units,
+ assumed to be in the output units for y_field.
y_max : float
- The maximum value of the y profile field.
+ The maximum value of the y profile field. If supplied without units,
+ assumed to be in the output units for y_field.
y_log : boolean
Controls whether or not the bins for the y field are evenly
spaced in linear (False) or log (True) space.
@@ -683,9 +730,11 @@
z_n : integer
The number of bins along the z direction.
z_min : float
- The minimum value of the z profile field.
+ The minimum value of the z profile field. If supplied without units,
+ assumed to be in the output units for z_field.
z_max : float
- The maximum value of thee z profile field.
+ The maximum value of thee z profile field. If supplied without units,
+ assumed to be in the output units for z_field.
z_log : boolean
Controls whether or not the bins for the z field are evenly
spaced in linear (False) or log (True) space.
@@ -700,20 +749,32 @@
weight_field = None):
super(Profile3D, self).__init__(data_source, weight_field)
# X
- self.x_field = x_field
+ self.x_field = data_source._determine_fields(x_field)[0]
self.x_log = x_log
+ self.field_info[self.x_field] = \
+ self.data_source.ds.field_info[self.x_field]
+ x_min, x_max = _sanitize_min_max_units(
+ x_min, x_max, self.field_info[self.x_field], self.ds.unit_registry)
self.x_bins = array_like_field(data_source,
self._get_bins(x_min, x_max, x_n, x_log),
self.x_field)
# Y
- self.y_field = y_field
+ self.y_field = data_source._determine_fields(y_field)[0]
self.y_log = y_log
+ self.field_info[self.y_field] = \
+ self.data_source.ds.field_info[self.y_field]
+ y_min, y_max = _sanitize_min_max_units(
+ y_min, y_max, self.field_info[self.y_field], self.ds.unit_registry)
self.y_bins = array_like_field(data_source,
self._get_bins(y_min, y_max, y_n, y_log),
self.y_field)
# Z
- self.z_field = z_field
+ self.z_field = data_source._determine_fields(z_field)[0]
self.z_log = z_log
+ self.field_info[self.z_field] = \
+ self.data_source.ds.field_info[self.z_field]
+ z_min, z_max = _sanitize_min_max_units(
+ z_min, z_max, self.field_info[self.z_field], self.ds.unit_registry)
self.z_bins = array_like_field(data_source,
self._get_bins(z_min, z_max, z_n, z_log),
self.z_field)
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -22,48 +22,54 @@
["density", "temperature", "dinosaurs"])
rt, tt, dt = dd.quantities["TotalQuantity"](
["density", "temperature", "dinosaurs"])
- # First we look at the
+
e1, e2 = 0.9, 1.1
for nb in [8, 16, 32, 64]:
- # We log all the fields or don't log 'em all. No need to do them
- # individually.
- for lf in [True, False]:
- direct_profile = Profile1D(
- dd, "density", nb, rmi*e1, rma*e2, lf, weight_field = None)
- direct_profile.add_fields(["ones", "temperature"])
+ for input_units in ['mks', 'cgs']:
+ for ex in [rmi, rma, tmi, tma, dmi, dma]:
+ getattr(ex, 'convert_to_%s' % input_units)()
+ # We log all the fields or don't log 'em all. No need to do them
+ # individually.
+ for lf in [True, False]:
+ direct_profile = Profile1D(
+ dd, "density", nb, rmi*e1, rma*e2, lf, weight_field=None)
+ direct_profile.add_fields(["ones", "temperature"])
- indirect_profile_s = create_profile(
- dd, "density", ["ones", "temperature"], n_bins=nb,
- extrema={'density': (rmi*e1, rma*e2)}, logs={'density': lf},
- weight_field=None)
+ indirect_profile_s = create_profile(
+ dd, "density", ["ones", "temperature"], n_bins=nb,
+ extrema={'density': (rmi*e1, rma*e2)}, logs={'density': lf},
+ weight_field=None)
- indirect_profile_t = create_profile(
- dd, ("gas", "density"),
- [("index", "ones"), ("gas", "temperature")], n_bins=nb,
- extrema={'density': (rmi*e1, rma*e2)}, logs={'density': lf},
- weight_field=None)
+ indirect_profile_t = create_profile(
+ dd, ("gas", "density"),
+ [("index", "ones"), ("gas", "temperature")], n_bins=nb,
+ extrema={'density': (rmi*e1, rma*e2)}, logs={'density': lf},
+ weight_field=None)
- for p1d in [direct_profile, indirect_profile_s,
- indirect_profile_t]:
- yield assert_equal, p1d["index", "ones"].sum(), nv
- yield assert_rel_equal, tt, p1d["gas", "temperature"].sum(), 7
+ for p1d in [direct_profile, indirect_profile_s,
+ indirect_profile_t]:
+ yield assert_equal, p1d["index", "ones"].sum(), nv
+ yield assert_rel_equal, tt, \
+ p1d["gas", "temperature"].sum(), 7
- p2d = Profile2D(dd,
- "density", nb, rmi*e1, rma*e2, lf,
- "temperature", nb, tmi*e1, tma*e2, lf,
- weight_field = None)
- p2d.add_fields(["ones", "temperature"])
- yield assert_equal, p2d["ones"].sum(), nv
- yield assert_rel_equal, tt, p2d["temperature"].sum(), 7
+ p2d = Profile2D(
+ dd,
+ "density", nb, rmi*e1, rma*e2, lf,
+ "temperature", nb, tmi*e1, tma*e2, lf,
+ weight_field=None)
+ p2d.add_fields(["ones", "temperature"])
+ yield assert_equal, p2d["ones"].sum(), nv
+ yield assert_rel_equal, tt, p2d["temperature"].sum(), 7
- p3d = Profile3D(dd,
- "density", nb, rmi*e1, rma*e2, lf,
- "temperature", nb, tmi*e1, tma*e2, lf,
- "dinosaurs", nb, dmi*e1, dma*e2, lf,
- weight_field = None)
- p3d.add_fields(["ones", "temperature"])
- yield assert_equal, p3d["ones"].sum(), nv
- yield assert_rel_equal, tt, p3d["temperature"].sum(), 7
+ p3d = Profile3D(
+ dd,
+ "density", nb, rmi*e1, rma*e2, lf,
+ "temperature", nb, tmi*e1, tma*e2, lf,
+ "dinosaurs", nb, dmi*e1, dma*e2, lf,
+ weight_field=None)
+ p3d.add_fields(["ones", "temperature"])
+ yield assert_equal, p3d["ones"].sum(), nv
+ yield assert_rel_equal, tt, p3d["temperature"].sum(), 7
p1d = Profile1D(dd, "x", nb, 0.0, 1.0, False,
weight_field = None)
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -41,9 +41,9 @@
for axis in [0, 1, 2]:
for weight_field in [None, "density"]:
yield PixelizedProjectionValuesTest(
- sizmbhloz, axis, field, weight_field,
+ ds, axis, field, weight_field,
dobj_name)
- yield FieldValuesTest(sizmbhloz, field, dobj_name)
+ yield FieldValuesTest(ds, field, dobj_name)
dobj = create_obj(ds, dobj_name)
s1 = dobj["ones"].sum()
s2 = sum(mask.sum() for block, mask in dobj.blocks)
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -34,7 +34,7 @@
def test_cloud():
ds = data_dir_load(cloud)
yield assert_equal, str(ds), "Cloud.0050"
- for test in small_patch_amr(cloud, _fields_cloud):
+ for test in small_patch_amr(ds, _fields_cloud):
test_cloud.__name__ = test.description
yield test
@@ -45,7 +45,7 @@
def test_blast():
ds = data_dir_load(blast)
yield assert_equal, str(ds), "Blast.0100"
- for test in small_patch_amr(blast, _fields_blast):
+ for test in small_patch_amr(ds, _fields_blast):
test_blast.__name__ = test.description
yield test
@@ -60,7 +60,7 @@
def test_stripping():
ds = data_dir_load(stripping, kwargs={"units_override":uo_stripping})
yield assert_equal, str(ds), "rps.0062"
- for test in small_patch_amr(stripping, _fields_stripping):
+ for test in small_patch_amr(ds, _fields_stripping):
test_stripping.__name__ = test.description
yield test
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/frontends/boxlib/tests/test_orion.py
--- a/yt/frontends/boxlib/tests/test_orion.py
+++ b/yt/frontends/boxlib/tests/test_orion.py
@@ -32,7 +32,7 @@
def test_radadvect():
ds = data_dir_load(radadvect)
yield assert_equal, str(ds), "plt00000"
- for test in small_patch_amr(radadvect, _fields):
+ for test in small_patch_amr(ds, _fields):
test_radadvect.__name__ = test.description
yield test
@@ -41,7 +41,7 @@
def test_radtube():
ds = data_dir_load(rt)
yield assert_equal, str(ds), "plt00500"
- for test in small_patch_amr(rt, _fields):
+ for test in small_patch_amr(ds, _fields):
test_radtube.__name__ = test.description
yield test
@@ -50,7 +50,7 @@
def test_star():
ds = data_dir_load(star)
yield assert_equal, str(ds), "plrd01000"
- for test in small_patch_amr(star, _fields):
+ for test in small_patch_amr(ds, _fields):
test_star.__name__ = test.description
yield test
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -34,7 +34,7 @@
def test_gc():
ds = data_dir_load(gc)
yield assert_equal, str(ds), "data.0077.3d.hdf5"
- for test in small_patch_amr(gc, _fields):
+ for test in small_patch_amr(ds, _fields):
test_gc.__name__ = test.description
yield test
@@ -43,7 +43,7 @@
def test_tb():
ds = data_dir_load(tb)
yield assert_equal, str(ds), "data.0005.3d.hdf5"
- for test in small_patch_amr(tb, _fields):
+ for test in small_patch_amr(ds, _fields):
test_tb.__name__ = test.description
yield test
@@ -52,7 +52,7 @@
def test_iso():
ds = data_dir_load(iso)
yield assert_equal, str(ds), "data.0000.3d.hdf5"
- for test in small_patch_amr(iso, _fields):
+ for test in small_patch_amr(ds, _fields):
test_iso.__name__ = test.description
yield test
@@ -62,7 +62,7 @@
def test_zp():
ds = data_dir_load(zp)
yield assert_equal, str(ds), "plt32.2d.hdf5"
- for test in small_patch_amr(zp, _zp_fields, input_center="c",
+ for test in small_patch_amr(ds, _zp_fields, input_center="c",
input_weight="rhs"):
test_zp.__name__ = test.description
yield test
@@ -72,7 +72,7 @@
def test_kho():
ds = data_dir_load(kho)
yield assert_equal, str(ds), "data.0004.hdf5"
- for test in small_patch_amr(kho, _fields):
+ for test in small_patch_amr(ds, _fields):
test_kho.__name__ = test.description
yield test
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -74,7 +74,7 @@
ds = data_dir_load(g30)
yield check_color_conservation(ds)
yield assert_equal, str(ds), "galaxy0030"
- for test in big_patch_amr(g30, _fields):
+ for test in big_patch_amr(ds, _fields):
test_galaxy0030.__name__ = test.description
yield test
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -158,7 +158,7 @@
return []
def _get_unique_identifier(self):
- return self.parameter_filename.__hash__()
+ return self.parameter_filename
def _get_current_time(self):
try:
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/frontends/exodus_ii/tests/test_outputs.py
--- a/yt/frontends/exodus_ii/tests/test_outputs.py
+++ b/yt/frontends/exodus_ii/tests/test_outputs.py
@@ -26,12 +26,20 @@
@requires_file(out)
def test_out():
ds = data_dir_load(out)
+ field_list = [('connect1', 'conv_indicator'),
+ ('connect1', 'conv_marker'),
+ ('connect1', 'convected'),
+ ('connect1', 'diffused'),
+ ('connect2', 'conv_indicator'),
+ ('connect2', 'conv_marker'),
+ ('connect2', 'convected'),
+ ('connect2', 'diffused')]
yield assert_equal, str(ds), "out.e"
yield assert_equal, ds.dimensionality, 3
- yield assert_equal, ds.unique_identifier, 5081193338833632556
yield assert_equal, ds.current_time, 0.0
yield assert_array_equal, ds.parameters['nod_names'], ['convected', 'diffused']
yield assert_equal, ds.parameters['num_meshes'], 2
+ yield assert_array_equal, ds.field_list, field_list
out_s002 = "ExodusII/out.e-s002"
@@ -39,9 +47,18 @@
@requires_file(out_s002)
def test_out002():
ds = data_dir_load(out_s002)
+ field_list = [('connect1', 'conv_indicator'),
+ ('connect1', 'conv_marker'),
+ ('connect1', 'convected'),
+ ('connect1', 'diffused'),
+ ('connect2', 'conv_indicator'),
+ ('connect2', 'conv_marker'),
+ ('connect2', 'convected'),
+ ('connect2', 'diffused')]
yield assert_equal, str(ds), "out.e-s002"
yield assert_equal, ds.dimensionality, 3
yield assert_equal, ds.current_time, 2.0
+ yield assert_array_equal, ds.field_list, field_list
gold = "ExodusII/gold.e"
@@ -49,4 +66,6 @@
@requires_file(gold)
def test_gold():
ds = data_dir_load(gold)
+ field_list = [('connect1', 'forced')]
yield assert_equal, str(ds), "gold.e"
+ yield assert_array_equal, ds.field_list, field_list
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/frontends/exodus_ii/util.py
--- a/yt/frontends/exodus_ii/util.py
+++ b/yt/frontends/exodus_ii/util.py
@@ -1,10 +1,15 @@
+import sys
import string
from itertools import takewhile
from collections import OrderedDict
import re
+_printable = set([ord(_) for _ in string.printable])
+
def sanitize_string(s):
- return "".join(_ for _ in takewhile(lambda a: a in string.printable, s))
+ if sys.version_info > (3, ):
+ return "".join([chr(_) for _ in takewhile(lambda a: a in _printable, s)])
+ return "".join([_ for _ in takewhile(lambda a: a in string.printable, s)])
def load_info_records(info_records):
info_records_parsed = [sanitize_string(line_chars) for line_chars in info_records]
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/frontends/fits/tests/test_outputs.py
--- a/yt/frontends/fits/tests/test_outputs.py
+++ b/yt/frontends/fits/tests/test_outputs.py
@@ -30,7 +30,7 @@
def test_grs():
ds = data_dir_load(grs, cls=FITSDataset, kwargs={"nan_mask":0.0})
yield assert_equal, str(ds), "grs-50-cube.fits"
- for test in small_patch_amr(grs, _fields_grs, input_center="c", input_weight="ones"):
+ for test in small_patch_amr(ds, _fields_grs, input_center="c", input_weight="ones"):
test_grs.__name__ = test.description
yield test
@@ -41,7 +41,7 @@
def test_velocity_field():
ds = data_dir_load(vf, cls=FITSDataset)
yield assert_equal, str(ds), "velocity_field_20.fits"
- for test in small_patch_amr(vf, _fields_vels, input_center="c", input_weight="ones"):
+ for test in small_patch_amr(ds, _fields_vels, input_center="c", input_weight="ones"):
test_velocity_field.__name__ = test.description
yield test
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -30,7 +30,7 @@
def test_sloshing():
ds = data_dir_load(sloshing)
yield assert_equal, str(ds), "sloshing_low_res_hdf5_plt_cnt_0300"
- for test in small_patch_amr(sloshing, _fields):
+ for test in small_patch_amr(ds, _fields):
test_sloshing.__name__ = test.description
yield test
@@ -41,7 +41,7 @@
def test_wind_tunnel():
ds = data_dir_load(wt)
yield assert_equal, str(ds), "windtunnel_4lev_hdf5_plt_cnt_0030"
- for test in small_patch_amr(wt, _fields_2d):
+ for test in small_patch_amr(ds, _fields_2d):
test_wind_tunnel.__name__ = test.description
yield test
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/frontends/gadget/tests/test_outputs.py
--- a/yt/frontends/gadget/tests/test_outputs.py
+++ b/yt/frontends/gadget/tests/test_outputs.py
@@ -25,7 +25,7 @@
isothermal_h5 = "IsothermalCollapse/snap_505.hdf5"
isothermal_bin = "IsothermalCollapse/snap_505"
-gdg = "GadgetDiskGalaxy/snapshot_200.hdf5"
+g64 = "gizmo_64/output/snap_N64L16_135.hdf5"
# This maps from field names to weight field names to use for projections
iso_fields = OrderedDict(
@@ -42,9 +42,9 @@
)
iso_kwargs = dict(bounding_box=[[-3, 3], [-3, 3], [-3, 3]])
-gdg_fields = iso_fields.copy()
-gdg_fields["deposit", "PartType4_density"] = None
-gdg_kwargs = dict(bounding_box=[[-1e5, 1e5], [-1e5, 1e5], [-1e5, 1e5]])
+g64_fields = iso_fields.copy()
+g64_fields["deposit", "PartType4_density"] = None
+g64_kwargs = dict(bounding_box=[[-1e5, 1e5], [-1e5, 1e5], [-1e5, 1e5]])
@requires_file(isothermal_h5)
@@ -58,14 +58,14 @@
@requires_ds(isothermal_h5)
def test_iso_collapse():
- for test in sph_answer(isothermal_h5, 'snap_505', 2**17,
- iso_fields, ds_kwargs=iso_kwargs):
+ ds = data_dir_load(isothermal_h5, kwargs=iso_kwargs)
+ for test in sph_answer(ds, 'snap_505', 2**17, iso_fields):
test_iso_collapse.__name__ = test.description
yield test
- at requires_ds(gdg, big_data=True)
-def test_gadget_disk_galaxy():
- for test in sph_answer(gdg, 'snapshot_200', 11907080, gdg_fields,
- ds_kwargs=gdg_kwargs):
- test_gadget_disk_galaxy.__name__ = test.description
+ at requires_ds(g64, big_data=True)
+def test_gizmo_64():
+ ds = data_dir_load(g64, kwargs=g64_kwargs)
+ for test in sph_answer(ds, 'snap_N64L16_135', 524288, g64_fields):
+ test_gizmo_64.__name__ = test.description
yield test
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/frontends/owls/tests/test_outputs.py
--- a/yt/frontends/owls/tests/test_outputs.py
+++ b/yt/frontends/owls/tests/test_outputs.py
@@ -45,7 +45,8 @@
@requires_ds(os33, big_data=True)
def test_snapshot_033():
- for test in sph_answer(os33, 'snap_033', 2*128**3, _fields):
+ ds = data_dir_load(os33)
+ for test in sph_answer(ds, 'snap_033', 2*128**3, _fields):
test_snapshot_033.__name__ = test.description
yield test
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -584,16 +584,17 @@
>>> dd = ds.all_data()
>>> dd['density']
- #FIXME
- YTArray[123.2856, 123.854, ..., 123.456, 12.42] (code_mass/code_length^3)
+ YTArray([ 0.87568064, 0.33686453, 0.70467189, ..., 0.70439916,
+ 0.97506269, 0.03047113]) g/cm**3
- >>> data = dict(density=(arr, 'g/cm**3'))
- >>> ds = load_uniform_grid(data, arr.shape, 3.03e24, bbox=bbox, nprocs=12)
+ >>> data = dict(density=(arr, 'kg/m**3'))
+ >>> ds = load_uniform_grid(data, arr.shape, length_unit=3.03e24,
+ ... bbox=bbox, nprocs=12)
>>> dd = ds.all_data()
>>> dd['density']
- #FIXME
- YTArray[123.2856, 123.854, ..., 123.456, 12.42] (g/cm**3)
+ YTArray([ 8.75680644e-04, 3.36864527e-04, 7.04671886e-04, ...,
+ 7.04399160e-04, 9.75062693e-04, 3.04711295e-05]) g/cm**3
"""
@@ -1618,32 +1619,36 @@
visualization will be present, and some analysis functions may not yet have
been implemented.
- Particle fields are detected as one-dimensional fields. The number of particles
- is set by the "number_of_particles" key in data.
+ Particle fields are detected as one-dimensional fields. The number of
+ particles is set by the "number_of_particles" key in data.
+
+ In the parameter descriptions below, a "vertex" is a 3D point in space, an
+ "element" is a single polyhedron whose location is defined by a set of
+ vertices, and a "mesh" is a set of polyhedral elements, each with the same
+ number of vertices.
Parameters
----------
connectivity : list of array_like or array_like
- This is the connectivity array for the meshes; this should either be a
- list where each element in the list is a numpy array or a single numpy
- array. Each array in the list can have different connectivity length
- and should be of shape (N,M) where N is the number of elements and M is
- the connectivity length.
+ This should either be a single 2D array or list of 2D arrays. If this
+ is a list, each element in the list corresponds to the connectivity
+ information for a distinct mesh. Each array can have different
+ connectivity length and should be of shape (N,M) where N is the number
+ of elements and M is the number of vertices per element.
coordinates : array_like
- This should be of size (L,3) where L is the number of vertices
- indicated in the connectivity matrix.
+ The 3D coordinates of mesh vertices. This should be of size (L,3) where
+ L is the number of vertices. When loading more than one mesh, the data
+ for each mesh should be concatenated into a single coordinates array.
node_data : dict or list of dicts
- This is a list of dicts of numpy arrays, where each element in the list
- is a different mesh, and where the keys of dicts are the field names.
- If a dict is supplied, this will be assumed to be the only mesh. These
- data fields are assumed to be node-centered, i.e. there must be one
- value for every node in the mesh.
+ For a single mesh, a dict mapping field names to 2D numpy arrays,
+ representing data defined at element vertices. For multiple meshes,
+ this must be a list of dicts.
elem_data : dict or list of dicts
- This is a list of dicts of numpy arrays, where each element in the list
- is a different mesh, and where the keys of dicts are the field names.
- If a dict is supplied, this will be assumed to be the only mesh. These
- data fields are assumed to be element-centered, i.e. there must be only
- one value for every element in the mesh.
+ For a single mesh, a dict mapping field names to 1D numpy arrays, where
+ each array has a length equal to the number of elements. The data
+ must be defined at the center of each mesh element and there must be
+ only one data value for each element. For multiple meshes, this must be
+ a list of dicts, with one dict for each mesh.
bbox : array_like (xdim:zdim, LE:RE), optional
Size of computational domain in units of the length unit.
sim_time : float, optional
@@ -1664,9 +1669,32 @@
"spectral_cube". Optionally, a tuple can be provided to specify the
axis ordering -- for instance, to specify that the axis ordering should
be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same
- can be done for other coordinates, for instance:
+ can be done for other coordinates, for instance:
("spherical", ("theta", "phi", "r")).
+ >>> # Coordinates for vertices of two tetrahedra
+ >>> coordinates = np.array([[0.0, 0.0, 0.5], [0.0, 1.0, 0.5], [0.5, 1, 0.5],
+ [0.5, 0.5, 0.0], [0.5, 0.5, 1.0]])
+
+ >>> # The indices in the coordinates array of mesh vertices.
+ >>> # This mesh has two elements.
+ >>> connectivity = np.array([[0, 1, 2, 4], [0, 1, 2, 3]])
+
+ >>> # Field data defined at the centers of the two mesh elements.
+ >>> elem_data = {
+ ... ('connect1', 'elem_field'): np.array([1, 2])
+ ... }
+
+ >>> # Field data defined at node vertices
+ >>> node_data = {
+ ... ('connect1', 'node_field'): np.array([[0.0, 1.0, 2.0, 4.0],
+ ... [0.0, 1.0, 2.0, 3.0]])
+ ... }
+
+ >>> ds = yt.load_unstructured_mesh(connectivity, coordinates,
+ ... elem_data=elem_data,
+ ... node_data=node_data)
+
"""
domain_dimensions = np.ones(3, "int32") * 2
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/frontends/tipsy/tests/test_outputs.py
--- a/yt/frontends/tipsy/tests/test_outputs.py
+++ b/yt/frontends/tipsy/tests/test_outputs.py
@@ -110,7 +110,8 @@
tipsy_gal = 'TipsyGalaxy/galaxy.00300'
@requires_ds(tipsy_gal)
def test_tipsy_galaxy():
- for test in sph_answer(tipsy_gal, 'galaxy.00300', 315372, tg_fields):
+ ds = data_dir_load(tipsy_gal)
+ for test in sph_answer(ds, 'galaxy.00300', 315372, tg_fields):
test_tipsy_galaxy.__name__ = test.description
yield test
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/tests/test_flake8.py
--- a/yt/tests/test_flake8.py
+++ b/yt/tests/test_flake8.py
@@ -8,14 +8,15 @@
@requires_module('flake8')
def test_flake8():
yt_dir = os.path.dirname(os.path.abspath(yt.__file__))
- initial_dir = os.getcwd()
- os.chdir(yt_dir)
- output_file = os.path.sep.join([os.path.dirname(initial_dir), 'flake8.out'])
+ output_file = os.environ.get("WORKSPACE", None) or os.getcwd()
+ output_file = os.path.join(output_file, 'flake8.out')
if os.path.exists(output_file):
os.remove(output_file)
output_string = "--output-file=%s" % output_file
- subprocess.call(['flake8', output_string, os.curdir])
- os.chdir(initial_dir)
+ config_string = "--config=%s" % os.path.join(os.path.dirname(yt_dir),
+ 'setup.cfg')
+ subprocess.call(['flake8', output_string, config_string, yt_dir])
+
with open(output_file) as f:
flake8_output = f.readlines()
if flake8_output != []:
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -671,7 +671,11 @@
err_msg=err_msg, verbose=True)
def compare_image_lists(new_result, old_result, decimals):
- fns = ['old.png', 'new.png']
+ fns = []
+ for i in range(2):
+ tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+ os.close(tmpfd)
+ fns.append(tmpname)
num_images = len(old_result)
assert(num_images > 0)
for i in range(num_images):
@@ -917,12 +921,9 @@
dobj_name)
-def sph_answer(ds_fn, ds_str_repr, ds_nparticles, fields, ds_kwargs=None):
- if not can_run_ds(ds_fn):
+def sph_answer(ds, ds_str_repr, ds_nparticles, fields):
+ if not can_run_ds(ds):
return
- if ds_kwargs is None:
- ds_kwargs = {}
- ds = data_dir_load(ds_fn, kwargs=ds_kwargs)
yield AssertWrapper("%s_string_representation" % str(ds), assert_equal,
str(ds), ds_str_repr)
dso = [None, ("sphere", ("c", (0.1, 'unitary')))]
@@ -946,9 +947,9 @@
for axis in [0, 1, 2]:
if particle_type is False:
yield PixelizedProjectionValuesTest(
- ds_fn, axis, field, weight_field,
+ ds, axis, field, weight_field,
dobj_name)
- yield FieldValuesTest(ds_fn, field, dobj_name,
+ yield FieldValuesTest(ds, field, dobj_name,
particle_type=particle_type)
def create_obj(ds, obj_type):
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -6,6 +6,8 @@
import subprocess
import shutil
import pkg_resources
+from sys import platform as _platform
+
def check_for_openmp():
"""Returns True if local setup supports OpenMP, False otherwise"""
@@ -101,6 +103,10 @@
embree_include_dir = loc + '/include'
embree_lib_dir = loc + '/lib'
embree_args = ['-I/' + embree_include_dir, '-L/' + embree_lib_dir]
+ if _platform == "darwin":
+ embree_lib_name = "embree.2"
+ else:
+ embree_lib_name = "embree"
else:
embree_args = None
# Because setjmp.h is included by lots of things, and because libpng hasn't
@@ -222,14 +228,14 @@
config.add_extension("mesh_construction",
["yt/utilities/lib/mesh_construction.pyx"],
include_dirs=["yt/utilities/lib", include_dirs],
- libraries=["m", "embree"], language="c++",
+ libraries=["m", embree_lib_name], language="c++",
extra_compile_args=embree_args,
extra_link_args=embree_args,
depends=["yt/utilities/lib/mesh_construction.pxd"])
config.add_extension("mesh_traversal",
["yt/utilities/lib/mesh_traversal.pyx"],
include_dirs=["yt/utilities/lib", include_dirs],
- libraries=["m", "embree"], language="c++",
+ libraries=["m", embree_lib_name], language="c++",
extra_compile_args=embree_args,
extra_link_args=embree_args,
depends=["yt/utilities/lib/mesh_traversal.pxd",
@@ -237,7 +243,7 @@
config.add_extension("mesh_samplers",
["yt/utilities/lib/mesh_samplers.pyx"],
include_dirs=["yt/utilities/lib", include_dirs],
- libraries=["m", "embree"], language="c++",
+ libraries=["m", embree_lib_name], language="c++",
extra_compile_args=embree_args,
extra_link_args=embree_args,
depends=["yt/utilities/lib/mesh_samplers.pxd",
@@ -245,7 +251,7 @@
config.add_extension("mesh_intersection",
["yt/utilities/lib/mesh_intersection.pyx"],
include_dirs=["yt/utilities/lib", include_dirs],
- libraries=["m", "embree"], language="c++",
+ libraries=["m", embree_lib_name], language="c++",
extra_compile_args=embree_args,
extra_link_args=embree_args,
depends=["yt/utilities/lib/mesh_intersection.pxd"])
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1030,6 +1030,7 @@
field = ensure_list(field)
for f in field:
self.plots[f].hide_colorbar()
+ return self
def show_colorbar(self, field=None):
"""
@@ -1048,6 +1049,7 @@
field = ensure_list(field)
for f in field:
self.plots[f].show_colorbar()
+ return self
def hide_axes(self, field=None):
"""
@@ -1086,6 +1088,7 @@
field = ensure_list(field)
for f in field:
self.plots[f].hide_axes()
+ return self
def show_axes(self, field=None):
"""
@@ -1104,6 +1107,7 @@
field = ensure_list(field)
for f in field:
self.plots[f].show_axes()
+ return self
class AxisAlignedSlicePlot(PWViewerMPL):
r"""Creates a slice plot from a dataset
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -17,6 +17,7 @@
from yt.extern.six.moves import builtins
from yt.extern.six.moves import zip as izip
from yt.extern.six import string_types, iteritems
+from collections import OrderedDict
import base64
import os
@@ -59,7 +60,7 @@
canvas_cls = mpl.FigureCanvasAgg
return canvas_cls
-class FigureContainer(dict):
+class FigureContainer(OrderedDict):
def __init__(self):
super(FigureContainer, self).__init__()
@@ -68,7 +69,7 @@
self[key] = figure
return self[key]
-class AxesContainer(dict):
+class AxesContainer(OrderedDict):
def __init__(self, fig_container):
self.fig_container = fig_container
self.ylim = {}
@@ -334,8 +335,7 @@
self.axes[field].plot(np.array(profile.x), np.array(field_data),
label=self.label[i], **self.plot_spec[i])
- # This relies on 'profile' leaking
- for fname, axes in self.axes.items():
+ for (fname, axes), profile in zip(self.axes.items(), self.profiles):
xscale, yscale = self._get_field_log(fname, profile)
xtitle, ytitle = self._get_field_title(fname, profile)
axes.set_xscale(xscale)
@@ -591,9 +591,7 @@
return self
def _get_field_log(self, field_y, profile):
- ds = profile.data_source.ds
- yf, = profile.data_source._determine_fields([field_y])
- yfi = ds._get_field_info(*yf)
+ yfi = profile.field_info[field_y]
if self.x_log is None:
x_log = profile.x_log
else:
@@ -624,12 +622,9 @@
return label
def _get_field_title(self, field_y, profile):
- ds = profile.data_source.ds
field_x = profile.x_field
- xf, yf = profile.data_source._determine_fields(
- [field_x, field_y])
- xfi = ds._get_field_info(*xf)
- yfi = ds._get_field_info(*yf)
+ xfi = profile.field_info[field_x]
+ yfi = profile.field_info[field_y]
x_unit = profile.x.units
y_unit = profile.field_units[field_y]
fractional = profile.fractional
@@ -753,14 +748,11 @@
return obj
def _get_field_title(self, field_z, profile):
- ds = profile.data_source.ds
field_x = profile.x_field
field_y = profile.y_field
- xf, yf, zf = profile.data_source._determine_fields(
- [field_x, field_y, field_z])
- xfi = ds._get_field_info(*xf)
- yfi = ds._get_field_info(*yf)
- zfi = ds._get_field_info(*zf)
+ xfi = profile.field_info[field_x]
+ yfi = profile.field_info[field_y]
+ zfi = profile.field_info[field_z]
x_unit = profile.x.units
y_unit = profile.y.units
z_unit = profile.field_units[field_z]
@@ -791,9 +783,7 @@
return label
def _get_field_log(self, field_z, profile):
- ds = profile.data_source.ds
- zf, = profile.data_source._determine_fields([field_z])
- zfi = ds._get_field_info(*zf)
+ zfi = profile.field_info[field_z]
if self.x_log is None:
x_log = profile.x_log
else:
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/visualization/tests/test_mesh_slices.py
--- a/yt/visualization/tests/test_mesh_slices.py
+++ b/yt/visualization/tests/test_mesh_slices.py
@@ -32,7 +32,7 @@
curdir = os.getcwd()
os.chdir(tmpdir)
- np.random.seed(0451)
+ np.random.seed(0x4d3d3d3)
# tetrahedral ds
ds = fake_tetrahedral_ds()
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/visualization/tests/test_profile_plots.py
--- a/yt/visualization/tests/test_profile_plots.py
+++ b/yt/visualization/tests/test_profile_plots.py
@@ -111,6 +111,12 @@
assert_array_almost_equal(xlim, (0.3, 0.8))
assert_array_almost_equal(ylim, (0.4, 0.6))
phases.append(pp)
+
+ p1 = create_profile(test_ds.all_data(), 'density', 'temperature')
+ p2 = create_profile(test_ds.all_data(), 'density', 'velocity_x')
+ profiles.append(ProfilePlot.from_profiles(
+ [p1, p2], labels=['temperature', 'velocity']))
+
cls.profiles = profiles
cls.phases = phases
cls.ds = test_ds
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/visualization/volume_rendering/tests/test_composite.py
--- a/yt/visualization/volume_rendering/tests/test_composite.py
+++ b/yt/visualization/volume_rendering/tests/test_composite.py
@@ -20,11 +20,6 @@
import numpy as np
from unittest import TestCase
-np.random.seed(0)
-
-# This toggles using a temporary directory. Turn off to examine images.
-use_tmpdir = True
-
def setup():
"""Test specific setup."""
@@ -33,8 +28,12 @@
class CompositeVRTest(TestCase):
+ # This toggles using a temporary directory. Turn off to examine images.
+ use_tmpdir = True
+
def setUp(self):
- if use_tmpdir:
+ np.random.seed(0)
+ if self.use_tmpdir:
self.curdir = os.getcwd()
# Perform I/O in safe place instead of yt main dir
self.tmpdir = tempfile.mkdtemp()
@@ -43,7 +42,7 @@
self.curdir, self.tmpdir = None, None
def tearDown(self):
- if use_tmpdir:
+ if self.use_tmpdir:
os.chdir(self.curdir)
shutil.rmtree(self.tmpdir)
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/visualization/volume_rendering/tests/test_lenses.py
--- a/yt/visualization/volume_rendering/tests/test_lenses.py
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -18,10 +18,6 @@
import numpy as np
from unittest import TestCase
-# This toggles using a temporary directory. Turn off to examine images.
-use_tmpdir = True
-
-
def setup():
"""Test specific setup."""
from yt.config import ytcfg
@@ -29,8 +25,11 @@
class LensTest(TestCase):
+ # This toggles using a temporary directory. Turn off to examine images.
+ use_tmpdir = True
+
def setUp(self):
- if use_tmpdir:
+ if self.use_tmpdir:
self.curdir = os.getcwd()
# Perform I/O in safe place instead of yt main dir
self.tmpdir = tempfile.mkdtemp()
@@ -42,7 +41,7 @@
self.ds = fake_random_ds(32, fields=self.field)
def tearDown(self):
- if use_tmpdir:
+ if self.use_tmpdir:
os.chdir(self.curdir)
shutil.rmtree(self.tmpdir)
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/visualization/volume_rendering/tests/test_offaxisprojection.py
--- a/yt/visualization/volume_rendering/tests/test_offaxisprojection.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Test for off_axis_projection and write_projection
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-from yt.testing import \
- fake_random_ds
-from yt.visualization.image_writer import write_projection
-from yt.visualization.volume_rendering.api import off_axis_projection
-
-
-test_ds = fake_random_ds(64)
-c = test_ds.domain_center
-norm = [0.5, 0.5, 0.5]
-W = test_ds.domain_width.max()
-N = 64
-field = ('gas', "density")
-oap_args = [test_ds, c, norm, W, N, field]
-
-image, sc = off_axis_projection(*oap_args)
-write_projection(image, 'oap.png')
-
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/visualization/volume_rendering/tests/test_points.py
--- a/yt/visualization/volume_rendering/tests/test_points.py
+++ b/yt/visualization/volume_rendering/tests/test_points.py
@@ -19,11 +19,6 @@
import numpy as np
from unittest import TestCase
-np.random.seed(0)
-
-# This toggles using a temporary directory. Turn off to examine images.
-use_tmpdir = True
-
def setup():
"""Test specific setup."""
@@ -32,8 +27,12 @@
class PointsVRTest(TestCase):
+ # This toggles using a temporary directory. Turn off to examine images.
+ use_tmpdir = True
+
def setUp(self):
- if use_tmpdir:
+ np.random.seed(0)
+ if self.use_tmpdir:
self.curdir = os.getcwd()
# Perform I/O in safe place instead of yt main dir
self.tmpdir = tempfile.mkdtemp()
@@ -42,7 +41,7 @@
self.curdir, self.tmpdir = None, None
def tearDown(self):
- if use_tmpdir:
+ if self.use_tmpdir:
os.chdir(self.curdir)
shutil.rmtree(self.tmpdir)
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/visualization/volume_rendering/tests/test_scene.py
--- a/yt/visualization/volume_rendering/tests/test_scene.py
+++ b/yt/visualization/volume_rendering/tests/test_scene.py
@@ -21,9 +21,6 @@
import numpy as np
from unittest import TestCase
-# This toggles using a temporary directory. Turn off to examine images.
-use_tmpdir = True
-
def setup():
"""Test specific setup."""
@@ -32,8 +29,12 @@
class RotationTest(TestCase):
+
+ # This toggles using a temporary directory. Turn off to examine images.
+ use_tmpdir = True
+
def setUp(self):
- if use_tmpdir:
+ if self.use_tmpdir:
self.curdir = os.getcwd()
# Perform I/O in safe place instead of yt main dir
self.tmpdir = tempfile.mkdtemp()
@@ -42,7 +43,7 @@
self.curdir, self.tmpdir = None, None
def tearDown(self):
- if use_tmpdir:
+ if self.use_tmpdir:
os.chdir(self.curdir)
shutil.rmtree(self.tmpdir)
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/visualization/volume_rendering/tests/test_sigma_clip.py
--- a/yt/visualization/volume_rendering/tests/test_sigma_clip.py
+++ b/yt/visualization/volume_rendering/tests/test_sigma_clip.py
@@ -18,10 +18,6 @@
from yt.testing import fake_random_ds
from unittest import TestCase
-# This toggles using a temporary directory. Turn off to examine images.
-use_tmpdir = True
-
-
def setup():
"""Test specific setup."""
from yt.config import ytcfg
@@ -29,8 +25,11 @@
class SigmaClipTest(TestCase):
+ # This toggles using a temporary directory. Turn off to examine images.
+ use_tmpdir = True
+
def setUp(self):
- if use_tmpdir:
+ if self.use_tmpdir:
self.curdir = os.getcwd()
# Perform I/O in safe place instead of yt main dir
self.tmpdir = tempfile.mkdtemp()
@@ -39,7 +38,7 @@
self.curdir, self.tmpdir = None, None
def tearDown(self):
- if use_tmpdir:
+ if self.use_tmpdir:
os.chdir(self.curdir)
shutil.rmtree(self.tmpdir)
@@ -50,5 +49,4 @@
sc.save('raw.png')
sc.save('clip_2.png', sigma_clip=2)
sc.save('clip_4.png', sigma_clip=4.0)
- print(sc)
return im, sc
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/visualization/volume_rendering/tests/test_simple_vr.py
--- a/yt/visualization/volume_rendering/tests/test_simple_vr.py
+++ b/yt/visualization/volume_rendering/tests/test_simple_vr.py
@@ -18,9 +18,6 @@
from yt.testing import fake_random_ds
from unittest import TestCase
-# This toggles using a temporary directory. Turn off to examine images.
-use_tmpdir = True
-
def setup():
"""Test specific setup."""
@@ -29,8 +26,11 @@
class SimpleVRTest(TestCase):
+ # This toggles using a temporary directory. Turn off to examine images.
+ use_tmpdir = True
+
def setUp(self):
- if use_tmpdir:
+ if self.use_tmpdir:
self.curdir = os.getcwd()
# Perform I/O in safe place instead of yt main dir
self.tmpdir = tempfile.mkdtemp()
@@ -39,12 +39,11 @@
self.curdir, self.tmpdir = None, None
def tearDown(self):
- if use_tmpdir:
+ if self.use_tmpdir:
os.chdir(self.curdir)
shutil.rmtree(self.tmpdir)
def test_simple_vr(self):
ds = fake_random_ds(32)
im, sc = yt.volume_render(ds, fname='test.png', sigma_clip=4.0)
- print(sc)
return im, sc
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/visualization/volume_rendering/tests/test_varia.py
--- a/yt/visualization/volume_rendering/tests/test_varia.py
+++ b/yt/visualization/volume_rendering/tests/test_varia.py
@@ -10,45 +10,69 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
+import os
+import tempfile
+import shutil
import numpy as np
import yt
from yt.testing import \
fake_random_ds
+from unittest import TestCase
-def test_simple_scene_creation():
- ds = fake_random_ds(32)
- yt.create_scene(ds)
+def setup():
+ """Test specific setup."""
+ from yt.config import ytcfg
+ ytcfg["yt", "__withintesting"] = "True"
-def test_modify_transfer_function():
- ds = fake_random_ds(32)
- im, sc = yt.volume_render(ds)
+class VariousVRTests(TestCase):
+ # This toggles using a temporary directory. Turn off to examine images.
+ use_tmpdir = True
+
+ def setUp(self):
+ if self.use_tmpdir:
+ self.curdir = os.getcwd()
+ # Perform I/O in safe place instead of yt main dir
+ self.tmpdir = tempfile.mkdtemp()
+ os.chdir(self.tmpdir)
+ else:
+ self.curdir, self.tmpdir = None, None
- volume_source = sc.get_source(0)
- tf = volume_source.transfer_function
- tf.clear()
- tf.grey_opacity = True
- tf.add_layers(3, colormap='RdBu')
- sc.render()
+ self.ds = fake_random_ds(32)
+
+ def tearDown(self):
+ if self.use_tmpdir:
+ os.chdir(self.curdir)
+ shutil.rmtree(self.tmpdir)
-def test_multiple_fields():
- ds = fake_random_ds(32)
- im, sc = yt.volume_render(ds)
+ def test_simple_scene_creation(self):
+ yt.create_scene(self.ds)
- volume_source = sc.get_source(0)
- volume_source.set_fields([('gas', 'velocity_x'), ('gas', 'density')])
- sc.render()
+ def test_modify_transfer_function(self):
+ im, sc = yt.volume_render(self.ds)
-def test_rotation_volume_rendering():
- ds = fake_random_ds(32)
- im, sc = yt.volume_render(ds)
-
- angle = 2*np.pi
- frames = 10
- for i in range(frames):
- sc.camera.yaw(angle/frames)
+ volume_source = sc.get_source(0)
+ tf = volume_source.transfer_function
+ tf.clear()
+ tf.grey_opacity = True
+ tf.add_layers(3, colormap='RdBu')
sc.render()
-def test_simple_volume_rendering():
- ds = fake_random_ds(32)
- im, sc = yt.volume_render(ds, sigma_clip=4.0)
+ def test_multiple_fields(self):
+ im, sc = yt.volume_render(self.ds)
+
+ volume_source = sc.get_source(0)
+ volume_source.set_fields([('gas', 'velocity_x'), ('gas', 'density')])
+ sc.render()
+
+ def test_rotation_volume_rendering(self):
+ im, sc = yt.volume_render(self.ds)
+
+ angle = 2 * np.pi
+ frames = 10
+ for i in range(frames):
+ sc.camera.yaw(angle / frames)
+ sc.render()
+
+ def test_simple_volume_rendering(self):
+ im, sc = yt.volume_render(self.ds, sigma_clip=4.0)
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/visualization/volume_rendering/tests/test_vr_cameras.py
--- a/yt/visualization/volume_rendering/tests/test_vr_cameras.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_cameras.py
@@ -28,9 +28,6 @@
from yt.visualization.tests.test_plotwindow import assert_fname
from unittest import TestCase
-# This toggles using a temporary directory. Turn off to examine images.
-use_tmpdir = True
-
def setup():
"""Test specific setup."""
@@ -39,8 +36,11 @@
class CameraTest(TestCase):
+ # This toggles using a temporary directory. Turn off to examine images.
+ use_tmpdir = True
+
def setUp(self):
- if use_tmpdir:
+ if self.use_tmpdir:
self.curdir = os.getcwd()
# Perform I/O in safe place instead of yt main dir
self.tmpdir = tempfile.mkdtemp()
@@ -56,7 +56,7 @@
self.field = ("gas", "density")
def tearDown(self):
- if use_tmpdir:
+ if self.use_tmpdir:
os.chdir(self.curdir)
shutil.rmtree(self.tmpdir)
diff -r 2e816e9a3e6a08ccd9ec528ceaed034273013151 -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 yt/visualization/volume_rendering/tests/test_zbuff.py
--- a/yt/visualization/volume_rendering/tests/test_zbuff.py
+++ b/yt/visualization/volume_rendering/tests/test_zbuff.py
@@ -21,11 +21,6 @@
import numpy as np
from unittest import TestCase
-np.random.seed(0)
-
-# This toggles using a temporary directory. Turn off to examine images.
-use_tmpdir = True
-
def setup():
"""Test specific setup."""
@@ -34,8 +29,12 @@
class ZBufferTest(TestCase):
+ # This toggles using a temporary directory. Turn off to examine images.
+ use_tmpdir = True
+
def setUp(self):
- if use_tmpdir:
+ np.random.seed(0)
+ if self.use_tmpdir:
self.curdir = os.getcwd()
# Perform I/O in safe place instead of yt main dir
self.tmpdir = tempfile.mkdtemp()
@@ -44,7 +43,7 @@
self.curdir, self.tmpdir = None, None
def tearDown(self):
- if use_tmpdir:
+ if self.use_tmpdir:
os.chdir(self.curdir)
shutil.rmtree(self.tmpdir)
https://bitbucket.org/yt_analysis/yt/commits/3b0251809970/
Changeset: 3b0251809970
Branch: yt
User: brittonsmith
Date: 2016-01-01 14:08:47+00:00
Summary: Add a missing import.
Affected #: 1 file
diff -r baf1cf061b743fb4fd45e94e0abfeab3b51d0bf5 -r 3b02518099703b19c8bcac6e94592308070f2ebd yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -13,7 +13,11 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import os.path
+import numpy as np
+import os
+
+from yt.frontends.gadget_fof.api import \
+ GadgetFOFDataset
from yt.testing import \
requires_file, \
assert_equal
@@ -21,7 +25,6 @@
FieldValuesTest, \
requires_ds, \
data_dir_load
-from yt.frontends.gadget_fof.api import GadgetFOFDataset
_fields = ("particle_position_x", "particle_position_y",
"particle_position_z", "particle_velocity_x",
https://bitbucket.org/yt_analysis/yt/commits/1dadbf228b96/
Changeset: 1dadbf228b96
Branch: yt
User: brittonsmith
Date: 2016-01-13 10:28:57+00:00
Summary: PR comment fixes.
Affected #: 2 files
diff -r 3b02518099703b19c8bcac6e94592308070f2ebd -r 1dadbf228b96dcbe33f9a644699d201745f56152 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -44,7 +44,9 @@
super(GadgetFOFParticleIndex, self).__init__(ds, dataset_type)
def _calculate_particle_count(self):
- "Calculate the total number of each type of particle."
+ """
+ Calculate the total number of each type of particle.
+ """
self.particle_count = \
dict([(ptype, sum([d.total_particles[ptype] for d in self.data_files]))
for ptype in self.ds.particle_types_raw])
diff -r 3b02518099703b19c8bcac6e94592308070f2ebd -r 1dadbf228b96dcbe33f9a644699d201745f56152 yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -58,7 +58,7 @@
# fof/subhalo catalog with member particles
g298 = "gadget_halos/data/groups_298/fof_subhalo_tab_298.0.hdf5"
- at requires_ds(g298)
+ at requires_file(g298)
def test_subhalos():
ds = data_dir_load(g298)
total_sub = 0
https://bitbucket.org/yt_analysis/yt/commits/f6f23bcf0202/
Changeset: f6f23bcf0202
Branch: yt
User: brittonsmith
Date: 2016-01-13 10:31:24+00:00
Summary: Removing some tests we didn't need.
Affected #: 1 file
diff -r 1dadbf228b96dcbe33f9a644699d201745f56152 -r f6f23bcf0202942ee29491425c48fc89fe38cb23 yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -39,7 +39,6 @@
@requires_ds(g5)
def test_fields_g5():
ds = data_dir_load(g5)
- yield assert_equal, str(ds), os.path.basename(g5)
for field in _fields:
yield FieldValuesTest(g5, field, particle_type=True)
@@ -47,7 +46,6 @@
@requires_ds(g42)
def test_fields_g42():
ds = data_dir_load(g42)
- yield assert_equal, str(ds), os.path.basename(g42)
for field in _fields:
yield FieldValuesTest(g42, field, particle_type=True)
https://bitbucket.org/yt_analysis/yt/commits/3a7d34241079/
Changeset: 3a7d34241079
Branch: yt
User: brittonsmith
Date: 2016-01-13 10:48:13+00:00
Summary: Adding another halo container test.
Affected #: 1 file
diff -r f6f23bcf0202942ee29491425c48fc89fe38cb23 -r 3a7d34241079e1ad482521748253052e83b43d9a yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -20,7 +20,8 @@
GadgetFOFDataset
from yt.testing import \
requires_file, \
- assert_equal
+ assert_equal, \
+ assert_array_equal
from yt.utilities.answer_testing.framework import \
FieldValuesTest, \
requires_ds, \
@@ -72,3 +73,19 @@
# Test that all subhalo particles are contained within
# their parent group.
yield assert_equal, total_sub, total_int
+
+ at requires_file(g298)
+def test_halomasses():
+ ds = data_dir_load(g298)
+ ad = ds.all_data()
+ for ptype in ["Group", "Subhalo"]:
+ nhalos = ds.index.particle_count[ptype]
+ mass = ds.arr(np.zeros(nhalos), "code_mass")
+ for i in range(nhalos):
+ halo = ds.halo(ptype, i)
+ mass[i] = halo.mass
+
+ # Check that masses from halo containers are the same
+ # as the array of all masses. This will test getting
+ # scalar fields for halos correctly.
+ yield assert_array_equal, ad[ptype, "particle_mass"], mass
https://bitbucket.org/yt_analysis/yt/commits/867a945096e6/
Changeset: 867a945096e6
Branch: yt
User: brittonsmith
Date: 2016-01-14 12:09:46+00:00
Summary: Removing unnecessary variables and import.
Affected #: 1 file
diff -r 3a7d34241079e1ad482521748253052e83b43d9a -r 867a945096e6b5e18a3c47d982a9ffb400676e18 yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -14,7 +14,6 @@
#-----------------------------------------------------------------------------
import numpy as np
-import os
from yt.frontends.gadget_fof.api import \
GadgetFOFDataset
@@ -39,14 +38,12 @@
@requires_ds(g5)
def test_fields_g5():
- ds = data_dir_load(g5)
for field in _fields:
yield FieldValuesTest(g5, field, particle_type=True)
@requires_ds(g42)
def test_fields_g42():
- ds = data_dir_load(g42)
for field in _fields:
yield FieldValuesTest(g42, field, particle_type=True)
https://bitbucket.org/yt_analysis/yt/commits/66b20a398d40/
Changeset: 66b20a398d40
Branch: yt
User: brittonsmith
Date: 2016-01-14 12:17:18+00:00
Summary: Looks nicer.
Affected #: 1 file
diff -r 867a945096e6b5e18a3c47d982a9ffb400676e18 -r 66b20a398d40470eb5fd9b1eeec579bba9e9100c yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -72,7 +72,7 @@
yield assert_equal, total_sub, total_int
@requires_file(g298)
-def test_halomasses():
+def test_halo_masses():
ds = data_dir_load(g298)
ad = ds.all_data()
for ptype in ["Group", "Subhalo"]:
https://bitbucket.org/yt_analysis/yt/commits/c90c0890a889/
Changeset: c90c0890a889
Branch: yt
User: brittonsmith
Date: 2016-01-14 12:24:57+00:00
Summary: Only check for id_offset if subhalo is not the first subhalo.
Affected #: 1 file
diff -r 66b20a398d40470eb5fd9b1eeec579bba9e9100c -r c90c0890a88929df12dd8c0eeb9ab4b757b9fc90 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -590,12 +590,15 @@
parent_subhalos, self.group_identifier))
# ids of the sibling subhalos that come before this one
- sub_ids = np.arange(
- self.particle_identifier - self.subgroup_identifier,
- self.particle_identifier)
- my_data = self.index._get_halo_values(
- "Subhalo", sub_ids, ["SubhaloLen"])
- id_offset = my_data["SubhaloLen"].sum(dtype=np.int64)
+ if self.subgroup_identifier > 0:
+ sub_ids = np.arange(
+ self.particle_identifier - self.subgroup_identifier,
+ self.particle_identifier)
+ my_data = self.index._get_halo_values(
+ "Subhalo", sub_ids, ["SubhaloLen"])
+ id_offset = my_data["SubhaloLen"].sum(dtype=np.int64)
+ else:
+ id_offset = 0
# Calculate the starting index for the member particles.
# First, add up all the particles in the earlier files.
https://bitbucket.org/yt_analysis/yt/commits/e24ec9b6c005/
Changeset: e24ec9b6c005
Branch: yt
User: brittonsmith
Date: 2016-01-25 13:24:12+00:00
Summary: Make sure halos_ds inherits all necessary attributes and making add_field add to both field lists.
Affected #: 1 file
diff -r c90c0890a88929df12dd8c0eeb9ab4b757b9fc90 -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -150,6 +150,10 @@
super(GadgetFOFDataset, self).__init__(filename, dataset_type,
units_override=units_override)
+ def add_field(self, *args, **kwargs):
+ super(GadgetFOFDataset, self).add_field(*args, **kwargs)
+ self._halos_ds.add_field(*args, **kwargs)
+
@property
def halos_field_list(self):
return self._halos_ds.field_list
@@ -424,7 +428,12 @@
pass
def _parse_parameter_file(self):
- for attr in ["dimensionality", "current_time",
+ for attr in ["cosmological_simulation", "cosmology",
+ "current_redshift", "current_time",
+ "dimensionality", "domain_dimensions",
+ "domain_left_edge", "domain_right_edge",
+ "domain_width", "hubble_constant",
+ "omega_lambda", "omega_matter",
"unique_identifier"]:
setattr(self, attr, getattr(self.real_ds, attr))
https://bitbucket.org/yt_analysis/yt/commits/14436cdc7b46/
Changeset: 14436cdc7b46
Branch: yt
User: brittonsmith
Date: 2016-01-26 15:22:04+00:00
Summary: Merging.
Affected #: 95 files
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -112,5 +112,5 @@
.navbar-form.navbar-right:last-child {
- margin-right: -20px;
+ margin-right: -60px;
}
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 doc/source/help/index.rst
--- a/doc/source/help/index.rst
+++ b/doc/source/help/index.rst
@@ -114,7 +114,7 @@
If you have not done so already (see :ref:`source-installation`), clone a copy of the yt mercurial repository and make it the 'active' installation by doing
-.. code-block::bash
+.. code-block:: bash
python setup.py develop
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -1452,7 +1452,9 @@
.. note::
PyX must be installed, which can be accomplished either manually
with ``pip install pyx`` or with the install script by setting
- ``INST_PYX=1``.
+ ``INST_PYX=1``. If you are using python2, you must install pyx
+ version 0.12.1 with ``pip install pyx==0.12.1``, since that is
+ the last version with python2 support.
This module can take any of the plots mentioned above and create an
EPS or PDF figure. For example,
@@ -1499,3 +1501,31 @@
margin, but it can be overridden by providing the keyword
``cb_location`` with a dict of either ``right, left, top, bottom``
with the fields as the keys.
+
+You can also combine slices, projections, and phase plots. Here is
+an example that includes slices and phase plots:
+
+.. code-block:: python
+
+ from yt import SlicePlot, PhasePlot
+ from yt.visualization.eps_writer import multiplot_yt
+
+ ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+ p1 = SlicePlot(ds, 0, 'density')
+ p1.set_width(10, 'kpc')
+
+ p2 = SlicePlot(ds, 0, 'temperature')
+ p2.set_width(10, 'kpc')
+ p2.set_cmap('temperature', 'hot')
+
+ sph = ds.sphere(ds.domain_center, (10, 'kpc'))
+ p3 = PhasePlot(sph, 'radius', 'density', 'temperature',
+ weight_field='cell_mass')
+
+ p4 = PhasePlot(sph, 'radius', 'density', 'pressure', 'cell_mass')
+
+ mp = multiplot_yt(2, 2, [p1, p2, p3, p4], savefig="yt", shrink_cb=0.9,
+ bare_axes=False, yt_nocbar=False, margins=(0.5,0.5))
+
+ mp.save_fig('multi_slice_phase')
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/analysis_modules/halo_finding/fof/EnzoFOF.c
--- a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
+++ b/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
@@ -48,7 +48,7 @@
xpos = (PyArrayObject *) PyArray_FromAny(oxpos,
PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
- NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+ NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
if(!xpos){
PyErr_Format(_FOFerror,
"EnzoFOF: xpos didn't work.");
@@ -58,7 +58,7 @@
ypos = (PyArrayObject *) PyArray_FromAny(oypos,
PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
- NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+ NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
if((!ypos)||(PyArray_SIZE(ypos) != num_particles)) {
PyErr_Format(_FOFerror,
"EnzoFOF: xpos and ypos must be the same length.");
@@ -67,7 +67,7 @@
zpos = (PyArrayObject *) PyArray_FromAny(ozpos,
PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
- NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+ NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
if((!zpos)||(PyArray_SIZE(zpos) != num_particles)) {
PyErr_Format(_FOFerror,
"EnzoFOF: xpos and zpos must be the same length.");
@@ -140,7 +140,8 @@
kdFinishFoF(kd);
- PyArray_UpdateFlags(particle_group_id, NPY_OWNDATA | particle_group_id->flags);
+ PyArray_UpdateFlags(particle_group_id,
+ NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_group_id));
PyObject *return_value = Py_BuildValue("N", particle_group_id);
Py_DECREF(xpos);
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/analysis_modules/halo_finding/hop/EnzoHop.c
--- a/yt/analysis_modules/halo_finding/hop/EnzoHop.c
+++ b/yt/analysis_modules/halo_finding/hop/EnzoHop.c
@@ -42,7 +42,7 @@
*xpos = (PyArrayObject *) PyArray_FromAny(oxpos,
PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
- NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+ NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
if(!*xpos){
PyErr_Format(_HOPerror,
"EnzoHop: xpos didn't work.");
@@ -52,7 +52,7 @@
*ypos = (PyArrayObject *) PyArray_FromAny(oypos,
PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
- NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+ NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
if((!*ypos)||(PyArray_SIZE(*ypos) != num_particles)) {
PyErr_Format(_HOPerror,
"EnzoHop: xpos and ypos must be the same length.");
@@ -61,7 +61,7 @@
*zpos = (PyArrayObject *) PyArray_FromAny(ozpos,
PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
- NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+ NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
if((!*zpos)||(PyArray_SIZE(*zpos) != num_particles)) {
PyErr_Format(_HOPerror,
"EnzoHop: xpos and zpos must be the same length.");
@@ -70,7 +70,7 @@
*mass = (PyArrayObject *) PyArray_FromAny(omass,
PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
- NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+ NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
if((!*mass)||(PyArray_SIZE(*mass) != num_particles)) {
PyErr_Format(_HOPerror,
"EnzoHop: xpos and mass must be the same length.");
@@ -129,11 +129,11 @@
PyArray_DescrFromType(NPY_FLOAT64));
fprintf(stdout, "Copying arrays for %d particles\n", num_particles);
- kd->np_masses = (npy_float64*) mass->data;
- kd->np_pos[0] = (npy_float64*) xpos->data;
- kd->np_pos[1] = (npy_float64*) ypos->data;
- kd->np_pos[2] = (npy_float64*) zpos->data;
- kd->np_densities = (npy_float64*) particle_density->data;
+ kd->np_masses = (npy_float64*) PyArray_DATA(mass);
+ kd->np_pos[0] = (npy_float64*) PyArray_DATA(xpos);
+ kd->np_pos[1] = (npy_float64*) PyArray_DATA(ypos);
+ kd->np_pos[2] = (npy_float64*) PyArray_DATA(zpos);
+ kd->np_densities = (npy_float64*) PyArray_DATA(particle_density);
kd->totalmass = totalmass;
for (i = 0; i < num_particles; i++) kd->p[i].np_index = i;
@@ -173,8 +173,8 @@
free(my_comm.gl);
free_slice(my_comm.s);
- PyArray_UpdateFlags(particle_density, NPY_OWNDATA | particle_density->flags);
- PyArray_UpdateFlags(particle_group_id, NPY_OWNDATA | particle_group_id->flags);
+ PyArray_UpdateFlags(particle_density, NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_density));
+ PyArray_UpdateFlags(particle_group_id, NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_group_id));
PyObject *return_value = Py_BuildValue("NN", particle_density, particle_group_id);
Py_DECREF(xpos);
@@ -266,11 +266,11 @@
totalmass /= normalize_to;
- self->kd->np_masses = (npy_float64 *)self->mass->data;
- self->kd->np_pos[0] = (npy_float64 *)self->xpos->data;
- self->kd->np_pos[1] = (npy_float64 *)self->ypos->data;
- self->kd->np_pos[2] = (npy_float64 *)self->zpos->data;
- self->kd->np_densities = (npy_float64 *)self->densities->data;
+ self->kd->np_masses = (npy_float64 *)PyArray_DATA(self->mass);
+ self->kd->np_pos[0] = (npy_float64 *)PyArray_DATA(self->xpos);
+ self->kd->np_pos[1] = (npy_float64 *)PyArray_DATA(self->ypos);
+ self->kd->np_pos[2] = (npy_float64 *)PyArray_DATA(self->zpos);
+ self->kd->np_densities = (npy_float64 *)PyArray_DATA(self->densities);
self->kd->totalmass = totalmass;
PrepareKD(self->kd);
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -18,7 +18,7 @@
from collections import defaultdict
from yt.funcs import mylog, get_pbar
-from yt.utilities.lib.ContourFinding import \
+from yt.utilities.lib.contour_finding import \
ContourTree, TileContourTree, link_node_contours, \
update_joins
from yt.utilities.lib.grid_traversal import \
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -13,7 +13,7 @@
from yt.data_objects.data_containers import YTFieldData
from yt.data_objects.time_series import DatasetSeries
-from yt.utilities.lib.CICDeposit import CICSample_3
+from yt.utilities.lib.particle_mesh_operations import CICSample_3
from yt.utilities.parallel_tools.parallel_analysis_interface import \
parallel_root_only
from yt.funcs import mylog, get_pbar
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -98,7 +98,8 @@
key of the unit: (width, 'unit'). If set to a float, code units
are assumed. Only for off-axis cubes.
depth_res : integer, optional
- The resolution of integration along the line of sight for off-axis cubes. Default: 256
+ Deprecated, this is still in the function signature for API
+ compatibility
method : string, optional
Set the projection method to be used.
"integrate" : line of sight integration over the line element.
@@ -189,7 +190,7 @@
buf = prj.to_frb(width, self.nx, center=self.center)["intensity"]
else:
buf, sc = off_axis_projection(ds, self.center, normal, width,
- (self.nx, self.ny, depth_res), "intensity",
+ (self.nx, self.ny), "intensity",
north_vector=north_vector, no_ghost=no_ghost,
method=method, weight=weight_field)
sto.result_id = i
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -171,7 +171,7 @@
domains[(dle,dre)] = [halo,]
#for niceness, let's process the domains in order of
#the one with the most halos
- domains_list = [(len(v),k,v) for k,v in domains.iteritems()]
+ domains_list = [(len(v),k,v) for k,v in domains.items()]
domains_list.sort()
domains_list.reverse() #we want the most populated domains first
return domains_list
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -19,7 +19,7 @@
#-----------------------------------------------------------------------------
from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
-from yt.funcs import fix_axis, mylog, get_pbar
+from yt.funcs import fix_axis, get_pbar
from yt.visualization.volume_rendering.off_axis_projection import \
off_axis_projection
from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -236,7 +236,8 @@
nx : integer, optional
The dimensions on a side of the projection image.
nz : integer, optional
- The number of elements along the integration path length.
+ Deprecated, this is still in the function signature for API
+ compatibility
north_vector : a sequence of floats
A vector defining the 'up' direction in the plot. This
option sets the orientation of the slicing plane. If not
@@ -250,8 +251,8 @@
less notable when the transfer function is smooth and
broad. Default: True
source : yt.data_objects.data_containers.YTSelectionContainer, optional
- If specified, this will be the data source used for selecting regions to project.
- Currently unsupported in yt 2.x.
+ If specified, this will be the data source used for selecting regions
+ to project.
Examples
--------
@@ -261,32 +262,31 @@
wd = self.ds.coordinates.sanitize_width(L, width, depth)
w = tuple(el.in_units('code_length').v for el in wd)
ctr, dctr = self.ds.coordinates.sanitize_center(center, L)
- res = (nx, nx, nz)
+ res = (nx, nx)
- if source is not None:
- mylog.error("Source argument is not currently supported for off-axis S-Z projections.")
- raise NotImplementedError
+ if source is None:
+ source = self.ds
beta_par = generate_beta_par(L)
self.ds.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
setup_sunyaev_zeldovich_fields(self.ds)
- dens = off_axis_projection(self.ds, ctr, L, w, res, "density",
+ dens = off_axis_projection(source, ctr, L, w, res, "density",
north_vector=north_vector, no_ghost=no_ghost)
- Te = off_axis_projection(self.ds, ctr, L, w, res, "t_sz",
+ Te = off_axis_projection(source, ctr, L, w, res, "t_sz",
north_vector=north_vector, no_ghost=no_ghost)/dens
- bpar = off_axis_projection(self.ds, ctr, L, w, res, "beta_par",
+ bpar = off_axis_projection(source, ctr, L, w, res, "beta_par",
north_vector=north_vector, no_ghost=no_ghost)/dens
- omega1 = off_axis_projection(self.ds, ctr, L, w, res, "t_squared",
+ omega1 = off_axis_projection(source, ctr, L, w, res, "t_squared",
north_vector=north_vector, no_ghost=no_ghost)/dens
omega1 = omega1/(Te*Te) - 1.
if self.high_order:
- bperp2 = off_axis_projection(self.ds, ctr, L, w, res, "beta_perp_squared",
+ bperp2 = off_axis_projection(source, ctr, L, w, res, "beta_perp_squared",
north_vector=north_vector, no_ghost=no_ghost)/dens
- sigma1 = off_axis_projection(self.ds, ctr, L, w, res, "t_beta_par",
+ sigma1 = off_axis_projection(source, ctr, L, w, res, "t_beta_par",
north_vector=north_vector, no_ghost=no_ghost)/dens
sigma1 = sigma1/Te - bpar
- kappa1 = off_axis_projection(self.ds, ctr, L, w, res, "beta_par_squared",
+ kappa1 = off_axis_projection(source, ctr, L, w, res, "beta_par_squared",
north_vector=north_vector, no_ghost=no_ghost)/dens
kappa1 -= bpar
else:
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -19,7 +19,7 @@
import fileinput
import io
from re import finditer
-from tempfile import TemporaryFile
+from tempfile import NamedTemporaryFile, TemporaryFile
import os
import sys
import zipfile
@@ -40,9 +40,9 @@
YTParticleDepositionNotImplemented, \
YTNoAPIKey, \
YTTooManyVertices
-from yt.utilities.lib.QuadTree import \
+from yt.utilities.lib.quad_tree import \
QuadTree
-from yt.utilities.lib.Interpolators import \
+from yt.utilities.lib.interpolators import \
ghost_zone_interpolate
from yt.utilities.lib.misc_utilities import \
fill_region, fill_region_float
@@ -58,7 +58,7 @@
from yt.fields.field_exceptions import \
NeedsOriginalGrid
from yt.frontends.stream.api import load_uniform_grid
-
+import yt.extern.six as six
class YTStreamline(YTSelectionContainer1D):
"""
@@ -810,7 +810,7 @@
int(any(self.ds.periodicity)))
fi = self.ds._get_field_info(field)
self[field] = self.ds.arr(dest, fi.units)
-
+
class LevelState(object):
current_dx = None
@@ -1628,7 +1628,7 @@
@parallel_root_only
def _export_ply(self, filename, bounds = None, color_field = None,
color_map = "algae", color_log = True, sample_type = "face"):
- if isinstance(filename, io.IOBase):
+ if hasattr(filename, 'read'):
f = filename
else:
f = open(filename, "wb")
@@ -1641,27 +1641,29 @@
("red", "uint8"), ("green", "uint8"), ("blue", "uint8") ]
fs = [("ni", "uint8"), ("v1", "<i4"), ("v2", "<i4"), ("v3", "<i4"),
("red", "uint8"), ("green", "uint8"), ("blue", "uint8") ]
- f.write("ply\n")
- f.write("format binary_little_endian 1.0\n")
- f.write("element vertex %s\n" % (nv))
- f.write("property float x\n")
- f.write("property float y\n")
- f.write("property float z\n")
+ f.write(b"ply\n")
+ f.write(b"format binary_little_endian 1.0\n")
+ line = "element vertex %i\n" % (nv)
+ f.write(six.b(line))
+ f.write(b"property float x\n")
+ f.write(b"property float y\n")
+ f.write(b"property float z\n")
if color_field is not None and sample_type == "vertex":
- f.write("property uchar red\n")
- f.write("property uchar green\n")
- f.write("property uchar blue\n")
+ f.write(b"property uchar red\n")
+ f.write(b"property uchar green\n")
+ f.write(b"property uchar blue\n")
v = np.empty(self.vertices.shape[1], dtype=vs)
cs = self.vertex_samples[color_field]
self._color_samples(cs, color_log, color_map, v)
else:
v = np.empty(self.vertices.shape[1], dtype=vs[:3])
- f.write("element face %s\n" % (nv/3))
- f.write("property list uchar int vertex_indices\n")
+ line = "element face %i\n" % (nv / 3)
+ f.write(six.b(line))
+ f.write(b"property list uchar int vertex_indices\n")
if color_field is not None and sample_type == "face":
- f.write("property uchar red\n")
- f.write("property uchar green\n")
- f.write("property uchar blue\n")
+ f.write(b"property uchar red\n")
+ f.write(b"property uchar green\n")
+ f.write(b"property uchar blue\n")
# Now we get our samples
cs = self[color_field]
arr = np.empty(cs.shape[0], dtype=np.dtype(fs))
@@ -1676,7 +1678,7 @@
np.divide(tmp, w, tmp)
np.subtract(tmp, 0.5, tmp) # Center at origin.
v[ax][:] = tmp
- f.write("end_header\n")
+ f.write(b"end_header\n")
v.tofile(f)
arr["ni"][:] = 3
vi = np.arange(nv, dtype="<i")
@@ -1765,39 +1767,49 @@
open(fn, "wb").write(ply_file.read())
raise YTTooManyVertices(self.vertices.shape[1], fn)
- zfs = TemporaryFile()
+ zfs = NamedTemporaryFile(suffix='.zip')
with zipfile.ZipFile(zfs, "w", zipfile.ZIP_DEFLATED) as zf:
zf.writestr("yt_export.ply", ply_file.read())
zfs.seek(0)
zfs.seek(0)
data = {
- 'title': title,
'token': api_key,
+ 'name': title,
'description': description,
- 'fileModel': zfs,
- 'filenameModel': "yt_export.zip",
+ 'tags': "yt",
}
- upload_id = self._upload_to_sketchfab(data)
+ files = {
+ 'modelFile': zfs
+ }
+ upload_id = self._upload_to_sketchfab(data, files)
upload_id = self.comm.mpi_bcast(upload_id, root = 0)
return upload_id
@parallel_root_only
- def _upload_to_sketchfab(self, data):
- import json
- from yt.extern.six.moves import urllib
- from yt.utilities.poster.encode import multipart_encode
- from yt.utilities.poster.streaminghttp import register_openers
- register_openers()
- datamulti, headers = multipart_encode(data)
- request = urllib.request.Request("https://api.sketchfab.com/v1/models",
- datamulti, headers)
- rv = urllib.request.urlopen(request).read()
- rv = json.loads(rv)
- upload_id = rv.get("result", {}).get("id", None)
- if upload_id:
- mylog.info("Model uploaded to: https://sketchfab.com/show/%s",
- upload_id)
+ def _upload_to_sketchfab(self, data, files):
+ import requests
+ SKETCHFAB_DOMAIN = 'sketchfab.com'
+ SKETCHFAB_API_URL = 'https://api.{}/v2/models'.format(SKETCHFAB_DOMAIN)
+ SKETCHFAB_MODEL_URL = 'https://{}/models/'.format(SKETCHFAB_DOMAIN)
+
+ try:
+ r = requests.post(SKETCHFAB_API_URL, data=data, files=files, verify=False)
+ except requests.exceptions.RequestException as e:
+ mylog.error("An error occured: {}".format(e))
+ return
+
+ result = r.json()
+
+ if r.status_code != requests.codes.created:
+ mylog.error("Upload to SketchFab failed with error: {}".format(result))
+ return
+
+ model_uid = result['uid']
+ model_url = SKETCHFAB_MODEL_URL + model_uid
+ if model_uid:
+ mylog.info("Model uploaded to: {}".format(model_url))
else:
mylog.error("Problem uploading.")
- return upload_id
+
+ return model_uid
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -24,7 +24,7 @@
from yt.utilities.exceptions import \
YTFieldTypeNotFound, \
YTParticleDepositionNotImplemented
-from yt.utilities.lib.Interpolators import \
+from yt.utilities.lib.interpolators import \
ghost_zone_interpolate
class AMRGridPatch(YTSelectionContainer):
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -32,7 +32,7 @@
new_bin_profile3d
from yt.utilities.parallel_tools.parallel_analysis_interface import \
ParallelAnalysisInterface, parallel_objects
-from yt.utilities.lib.CICDeposit import \
+from yt.utilities.lib.particle_mesh_operations import \
CICDeposit_2, \
NGPDeposit_2
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -293,6 +293,22 @@
def _is_valid(cls, *args, **kwargs):
return False
+ @classmethod
+ def _guess_candidates(cls, base, directories, files):
+ """
+ This is a class method that accepts a directory (base), a list of files
+ in that directory, and a list of subdirectories. It should return a
+ list of filenames (defined relative to the supplied directory) and a
+ boolean as to whether or not further directories should be recursed.
+
+ This function doesn't need to catch all possibilities, nor does it need
+ to filter possibilities.
+ """
+ return [], True
+
+ def close(self):
+ pass
+
def __getitem__(self, key):
""" Returns units, parameters, or conversion_factors in that order. """
return self.parameters[key]
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/extern/progressbar.py
--- a/yt/extern/progressbar.py
+++ /dev/null
@@ -1,370 +0,0 @@
-#!/usr/bin/python
-# -*- coding: iso-8859-1 -*-
-#
-# progressbar - Text progressbar library for python.
-# Copyright (c) 2005 Nilton Volpato
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-
-
-"""Text progressbar library for python.
-
-This library provides a text mode progressbar. This is tipically used
-to display the progress of a long running operation, providing a
-visual clue that processing is underway.
-
-The ProgressBar class manages the progress, and the format of the line
-is given by a number of widgets. A widget is an object that may
-display diferently depending on the state of the progress. There are
-three types of widget:
-- a string, which always shows itself;
-- a ProgressBarWidget, which may return a diferent value every time
-it's update method is called; and
-- a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
-expands to fill the remaining width of the line.
-
-The progressbar module is very easy to use, yet very powerful. And
-automatically supports features like auto-resizing when available.
-"""
-from __future__ import print_function
-
-__author__ = "Nilton Volpato"
-__author_email__ = "first-name dot last-name @ gmail.com"
-__date__ = "2006-05-07"
-__version__ = "2.2"
-
-# Changelog
-#
-# 2006-05-07: v2.2 fixed bug in windows
-# 2005-12-04: v2.1 autodetect terminal width, added start method
-# 2005-12-04: v2.0 everything is now a widget (wow!)
-# 2005-12-03: v1.0 rewrite using widgets
-# 2005-06-02: v0.5 rewrite
-# 2004-??-??: v0.1 first version
-
-
-from yt.extern.six import string_types
-import sys, time
-from array import array
-try:
- from fcntl import ioctl
- import termios
-except ImportError:
- pass
-import signal
-
-class ProgressBarWidget(object):
- """This is an element of ProgressBar formatting.
-
- The ProgressBar object will call it's update value when an update
- is needed. It's size may change between call, but the results will
- not be good if the size changes drastically and repeatedly.
- """
- def update(self, pbar):
- """Returns the string representing the widget.
-
- The parameter pbar is a reference to the calling ProgressBar,
- where one can access attributes of the class for knowing how
- the update must be made.
-
- At least this function must be overriden."""
- pass
-
-class ProgressBarWidgetHFill(object):
- """This is a variable width element of ProgressBar formatting.
-
- The ProgressBar object will call it's update value, informing the
- width this object must the made. This is like TeX \\hfill, it will
- expand to fill the line. You can use more than one in the same
- line, and they will all have the same width, and together will
- fill the line.
- """
- def update(self, pbar, width):
- """Returns the string representing the widget.
-
- The parameter pbar is a reference to the calling ProgressBar,
- where one can access attributes of the class for knowing how
- the update must be made. The parameter width is the total
- horizontal width the widget must have.
-
- At least this function must be overriden."""
- pass
-
-
-class ETA(ProgressBarWidget):
- "Widget for the Estimated Time of Arrival"
- def format_time(self, seconds):
- return time.strftime('%H:%M:%S', time.gmtime(seconds))
- def update(self, pbar):
- if pbar.currval == 0:
- return 'ETA: --:--:--'
- elif pbar.finished:
- return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
- else:
- elapsed = pbar.seconds_elapsed
- eta = elapsed * pbar.maxval / pbar.currval - elapsed
- return 'ETA: %s' % self.format_time(eta)
-
-class FileTransferSpeed(ProgressBarWidget):
- "Widget for showing the transfer speed (useful for file transfers)."
- def __init__(self):
- self.fmt = '%6.2f %s'
- self.units = ['B','K','M','G','T','P']
- def update(self, pbar):
- if pbar.seconds_elapsed < 2e-6:#== 0:
- bps = 0.0
- else:
- bps = float(pbar.currval) / pbar.seconds_elapsed
- spd = bps
- for u in self.units:
- if spd < 1000:
- break
- spd /= 1000
- return self.fmt % (spd, u+'/s')
-
-class RotatingMarker(ProgressBarWidget):
- "A rotating marker for filling the bar of progress."
- def __init__(self, markers='|/-\\'):
- self.markers = markers
- self.curmark = -1
- def update(self, pbar):
- if pbar.finished:
- return self.markers[0]
- self.curmark = (self.curmark + 1)%len(self.markers)
- return self.markers[self.curmark]
-
-class Percentage(ProgressBarWidget):
- "Just the percentage done."
- def update(self, pbar):
- return '%3d%%' % pbar.percentage()
-
-class Bar(ProgressBarWidgetHFill):
- "The bar of progress. It will strech to fill the line."
- def __init__(self, marker='#', left='|', right='|'):
- self.marker = marker
- self.left = left
- self.right = right
- def _format_marker(self, pbar):
- if isinstance(self.marker, string_types):
- return self.marker
- else:
- return self.marker.update(pbar)
- def update(self, pbar, width):
- percent = pbar.percentage()
- cwidth = int(width - len(self.left) - len(self.right))
- marked_width = int(percent * cwidth / 100)
- m = self._format_marker(pbar)
- bar = (self.left + (m*marked_width).ljust(cwidth) + self.right)
- return bar
-
-class ReverseBar(Bar):
- "The reverse bar of progress, or bar of regress. :)"
- def update(self, pbar, width):
- percent = pbar.percentage()
- cwidth = width - len(self.left) - len(self.right)
- marked_width = int(percent * cwidth / 100)
- m = self._format_marker(pbar)
- bar = (self.left + (m*marked_width).rjust(cwidth) + self.right)
- return bar
-
-default_widgets = [Percentage(), ' ', Bar()]
-class ProgressBar(object):
- """This is the ProgressBar class, it updates and prints the bar.
-
- The term_width parameter may be an integer. Or None, in which case
- it will try to guess it, if it fails it will default to 80 columns.
-
- The simple use is like this:
- >>> pbar = ProgressBar().start()
- >>> for i in xrange(100):
- ... # do something
- ... pbar.update(i+1)
- ...
- >>> pbar.finish()
-
- But anything you want to do is possible (well, almost anything).
- You can supply different widgets of any type in any order. And you
- can even write your own widgets! There are many widgets already
- shipped and you should experiment with them.
-
- When implementing a widget update method you may access any
- attribute or function of the ProgressBar object calling the
- widget's update method. The most important attributes you would
- like to access are:
- - currval: current value of the progress, 0 <= currval <= maxval
- - maxval: maximum (and final) value of the progress
- - finished: True if the bar is have finished (reached 100%), False o/w
- - start_time: first time update() method of ProgressBar was called
- - seconds_elapsed: seconds elapsed since start_time
- - percentage(): percentage of the progress (this is a method)
- """
- def __init__(self, maxval=100, widgets=default_widgets, term_width=None,
- fd=sys.stderr):
- assert maxval > 0
- self.maxval = maxval
- self.widgets = widgets
- self.fd = fd
- self.signal_set = False
- if term_width is None:
- try:
- self.handle_resize(None,None)
- signal.signal(signal.SIGWINCH, self.handle_resize)
- self.signal_set = True
- except:
- self.term_width = 79
- else:
- self.term_width = term_width
-
- self.currval = 0
- self.finished = False
- self.prev_percentage = -1
- self.start_time = None
- self.seconds_elapsed = 0
-
- def handle_resize(self, signum, frame):
- h,w=array('h', ioctl(self.fd,termios.TIOCGWINSZ,'\0'*8))[:2]
- self.term_width = w
-
- def percentage(self):
- "Returns the percentage of the progress."
- return self.currval*100.0 / self.maxval
-
- def _format_widgets(self):
- r = []
- hfill_inds = []
- num_hfill = 0
- currwidth = 0
- for i, w in enumerate(self.widgets):
- if isinstance(w, ProgressBarWidgetHFill):
- r.append(w)
- hfill_inds.append(i)
- num_hfill += 1
- elif isinstance(w, string_types):
- r.append(w)
- currwidth += len(w)
- else:
- weval = w.update(self)
- currwidth += len(weval)
- r.append(weval)
- for iw in hfill_inds:
- r[iw] = r[iw].update(self, (self.term_width-currwidth)/num_hfill)
- return r
-
- def _format_line(self):
- return ''.join(self._format_widgets()).ljust(self.term_width)
-
- def _need_update(self):
- return int(self.percentage()) != int(self.prev_percentage)
-
- def update(self, value):
- "Updates the progress bar to a new value."
- assert 0 <= value <= self.maxval
- self.currval = value
- if not self._need_update() or self.finished:
- return
- if not self.start_time:
- self.start_time = time.time()
- self.seconds_elapsed = time.time() - self.start_time
- self.prev_percentage = self.percentage()
- if value != self.maxval:
- self.fd.write(self._format_line() + '\r')
- else:
- self.finished = True
- self.fd.write(self._format_line() + '\n')
-
- def start(self):
- """Start measuring time, and prints the bar at 0%.
-
- It returns self so you can use it like this:
- >>> pbar = ProgressBar().start()
- >>> for i in xrange(100):
- ... # do something
- ... pbar.update(i+1)
- ...
- >>> pbar.finish()
- """
- self.update(0)
- return self
-
- def finish(self):
- """Used to tell the progress is finished."""
- self.update(self.maxval)
- if self.signal_set:
- signal.signal(signal.SIGWINCH, signal.SIG_DFL)
-
-
-
-
-
-
-if __name__=='__main__':
- import os
-
- def example1():
- widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
- ' ', ETA(), ' ', FileTransferSpeed()]
- pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
- for i in range(1000000):
- # do something
- pbar.update(10*i+1)
- pbar.finish()
- print()
-
- def example2():
- class CrazyFileTransferSpeed(FileTransferSpeed):
- "It's bigger between 45 and 80 percent"
- def update(self, pbar):
- if 45 < pbar.percentage() < 80:
- return 'Bigger Now ' + FileTransferSpeed.update(self,pbar)
- else:
- return FileTransferSpeed.update(self,pbar)
-
- widgets = [CrazyFileTransferSpeed(),' <<<', Bar(), '>>> ', Percentage(),' ', ETA()]
- pbar = ProgressBar(widgets=widgets, maxval=10000000)
- # maybe do something
- pbar.start()
- for i in range(2000000):
- # do something
- pbar.update(5*i+1)
- pbar.finish()
- print()
-
- def example3():
- widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
- pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
- for i in range(1000000):
- # do something
- pbar.update(10*i+1)
- pbar.finish()
- print()
-
- def example4():
- widgets = ['Test: ', Percentage(), ' ',
- Bar(marker='0',left='[',right=']'),
- ' ', ETA(), ' ', FileTransferSpeed()]
- pbar = ProgressBar(widgets=widgets, maxval=500)
- pbar.start()
- for i in range(100,500+1,50):
- time.sleep(0.2)
- pbar.update(i)
- pbar.finish()
- print()
-
-
- example1()
- example2()
- example3()
- example4()
-
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/extern/setup.py
--- a/yt/extern/setup.py
+++ b/yt/extern/setup.py
@@ -11,5 +11,6 @@
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('extern', parent_package, top_path)
+ config.add_subpackage("tqdm")
config.make_config_py()
return config
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/extern/tqdm/LICENSE
--- /dev/null
+++ b/yt/extern/tqdm/LICENSE
@@ -0,0 +1,22 @@
+https://github.com/tqdm/tqdm
+
+The MIT License (MIT)
+
+Copyright (c) 2013 noamraph
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/extern/tqdm/__init__.py
--- /dev/null
+++ b/yt/extern/tqdm/__init__.py
@@ -0,0 +1,11 @@
+from ._tqdm import tqdm
+from ._tqdm import trange
+from ._tqdm import format_interval
+from ._tqdm import format_meter
+from ._tqdm_gui import tqdm_gui
+from ._tqdm_gui import tgrange
+from ._tqdm_pandas import tqdm_pandas
+from ._version import __version__ # NOQA
+
+__all__ = ['tqdm', 'tqdm_gui', 'trange', 'tgrange', 'format_interval',
+ 'format_meter', 'tqdm_pandas', '__version__']
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/extern/tqdm/_tqdm.py
--- /dev/null
+++ b/yt/extern/tqdm/_tqdm.py
@@ -0,0 +1,562 @@
+"""
+Customisable progressbar decorator for iterators.
+Includes a default (x)range iterator printing to stderr.
+
+Usage:
+ >>> from tqdm import trange[, tqdm]
+ >>> for i in trange(10): #same as: for i in tqdm(xrange(10))
+ ... ...
+"""
+# future division is important to divide integers and get as
+# a result precise floating numbers (instead of truncated int)
+from __future__ import division, absolute_import
+# import compatibility functions and utilities
+from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
+ _term_move_up
+import sys
+from time import time
+
+
+__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
+ "casperdcl", "lrq3000"]}
+__all__ = ['tqdm', 'trange', 'format_interval', 'format_meter']
+
+
+def format_sizeof(num, suffix=''):
+ """
+ Formats a number (greater than unity) with SI Order of Magnitude prefixes.
+
+ Parameters
+ ----------
+ num : float
+ Number ( >= 1) to format.
+ suffix : str, optional
+ Post-postfix [default: ''].
+
+ Returns
+ -------
+ out : str
+ Number with Order of Magnitude SI unit postfix.
+ """
+ for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
+ if abs(num) < 999.95:
+ if abs(num) < 99.95:
+ if abs(num) < 9.995:
+ return '{0:1.2f}'.format(num) + unit + suffix
+ return '{0:2.1f}'.format(num) + unit + suffix
+ return '{0:3.0f}'.format(num) + unit + suffix
+ num /= 1000.0
+ return '{0:3.1f}Y'.format(num) + suffix
+
+
+def format_interval(t):
+ """
+ Formats a number of seconds as a clock time, [H:]MM:SS
+
+ Parameters
+ ----------
+ t : int
+ Number of seconds.
+ Returns
+ -------
+ out : str
+ [H:]MM:SS
+ """
+ mins, s = divmod(int(t), 60)
+ h, m = divmod(mins, 60)
+ if h:
+ return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
+ else:
+ return '{0:02d}:{1:02d}'.format(m, s)
+
+
+def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
+ unit='it', unit_scale=False, rate=None):
+ """
+ Return a string-based progress bar given some parameters
+
+ Parameters
+ ----------
+ n : int
+ Number of finished iterations.
+ total : int
+ The expected total number of iterations. If meaningless (), only
+ basic progress statistics are displayed (no ETA).
+ elapsed : float
+ Number of seconds passed since start.
+ ncols : int, optional
+ The width of the entire output message. If specified, dynamically
+ resizes the progress meter to stay within this bound
+ [default: None]. The fallback meter width is 10 for the progress bar
+ + no limit for the iterations counter and statistics. If 0, will not
+ print any meter (only stats).
+ prefix : str, optional
+ Prefix message (included in total width) [default: ''].
+ ascii : bool, optional
+ If not set, use unicode (smooth blocks) to fill the meter
+ [default: False]. The fallback is to use ASCII characters (1-9 #).
+ unit : str, optional
+ The iteration unit [default: 'it'].
+ unit_scale : bool, optional
+ If set, the number of iterations will printed with an appropriate
+ SI metric prefix (K = 10^3, M = 10^6, etc.) [default: False].
+ rate : float, optional
+ Manual override for iteration rate.
+ If [default: None], uses n/elapsed.
+
+ Returns
+ -------
+ out : Formatted meter and stats, ready to display.
+ """
+
+ # sanity check: total
+ if total and n > total:
+ total = None
+
+ elapsed_str = format_interval(elapsed)
+
+ # if unspecified, attempt to use rate = average speed
+ # (we allow manual override since predicting time is an arcane art)
+ if rate is None and elapsed:
+ rate = n / elapsed
+ rate_fmt = ((format_sizeof(rate) if unit_scale else
+ '{0:5.2f}'.format(rate)) if elapsed else
+ '?') \
+ + unit + '/s'
+
+ if unit_scale:
+ n_fmt = format_sizeof(n)
+ total_fmt = format_sizeof(total) if total else None
+ else:
+ n_fmt = str(n)
+ total_fmt = str(total)
+
+ # total is known: we can predict some stats
+ if total:
+ # fractional and percentage progress
+ frac = n / total
+ percentage = frac * 100
+
+ remaining_str = format_interval((total - n) / rate) if rate else '?'
+
+ # format the stats displayed to the left and right sides of the bar
+ l_bar = (prefix if prefix else '') + '{0:3.0f}%|'.format(percentage)
+ r_bar = '| {0}/{1} [{2}<{3}, {4}]'.format(
+ n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt)
+
+ if ncols == 0:
+ return l_bar[:-1] + r_bar[1:]
+
+ # space available for bar's display
+ N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
+ else 10
+
+ # format bar depending on availability of unicode/ascii chars
+ if ascii:
+ bar_length, frac_bar_length = divmod(
+ int(frac * N_BARS * 10), 10)
+
+ bar = '#' * bar_length
+ frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
+ else ' '
+
+ else:
+ bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
+
+ bar = _unich(0x2588) * bar_length
+ frac_bar = _unich(0x2590 - frac_bar_length) \
+ if frac_bar_length else ' '
+
+ # whitespace padding
+ if bar_length < N_BARS:
+ full_bar = bar + frac_bar + \
+ ' ' * max(N_BARS - bar_length - 1, 0)
+ else:
+ full_bar = bar + \
+ ' ' * max(N_BARS - bar_length, 0)
+
+ return l_bar + full_bar + r_bar
+
+ # no total: no progressbar, ETA, just progress stats
+ else:
+ return (prefix if prefix else '') + '{0}{1} [{2}, {3}]'.format(
+ n_fmt, unit, elapsed_str, rate_fmt)
+
+
+def StatusPrinter(file):
+ """
+ Manage the printing and in-place updating of a line of characters.
+ Note that if the string is longer than a line, then in-place updating
+ may not work (it will print a new line at each refresh).
+ """
+ fp = file
+ if not getattr(fp, 'flush', False): # pragma: no cover
+ fp.flush = lambda: None
+
+ last_printed_len = [0] # closure over mutable variable (fast)
+
+ def print_status(s):
+ len_s = len(s)
+ fp.write('\r' + s + (' ' * max(last_printed_len[0] - len_s, 0)))
+ fp.flush()
+ last_printed_len[0] = len_s
+ return print_status
+
+
+class tqdm(object):
+ """
+ Decorate an iterable object, returning an iterator which acts exactly
+ like the orignal iterable, but prints a dynamically updating
+ progressbar every time a value is requested.
+ """
+ def __init__(self, iterable=None, desc=None, total=None, leave=False,
+ file=sys.stderr, ncols=None, mininterval=0.1,
+ maxinterval=10.0, miniters=None, ascii=None, disable=False,
+ unit='it', unit_scale=False, dynamic_ncols=False,
+ smoothing=0.3, nested=False, gui=False):
+ """
+ Parameters
+ ----------
+ iterable : iterable, optional
+ Iterable to decorate with a progressbar.
+ Leave blank [default: None] to manually manage the updates.
+ desc : str, optional
+ Prefix for the progressbar [default: None].
+ total : int, optional
+ The number of expected iterations. If not given, len(iterable)
+ is used if possible. As a last resort, only basic progress
+ statistics are displayed (no ETA, no progressbar). If `gui` is
+ True and this parameter needs subsequent updating, specify an
+ initial arbitrary large positive integer, e.g. int(9e9).
+ leave : bool, optional
+ If [default: False], removes all traces of the progressbar
+ upon termination of iteration.
+ file : `io.TextIOWrapper` or `io.StringIO`, optional
+ Specifies where to output the progress messages
+ [default: sys.stderr]. Uses `file.write(str)` and `file.flush()`
+ methods.
+ ncols : int, optional
+ The width of the entire output message. If specified,
+ dynamically resizes the progressbar to stay within this bound.
+ If [default: None], attempts to use environment width. The
+ fallback is a meter width of 10 and no limit for the counter and
+ statistics. If 0, will not print any meter (only stats).
+ mininterval : float, optional
+ Minimum progress update interval, in seconds [default: 0.1].
+ maxinterval : float, optional
+ Maximum progress update interval, in seconds [default: 10.0].
+ miniters : int, optional
+ Minimum progress update interval, in iterations [default: None].
+ ascii : bool, optional
+ If [default: None] or false, use unicode (smooth blocks) to fill
+ the meter. The fallback is to use ASCII characters `1-9 #`.
+ disable : bool
+ Whether to disable the entire progressbar wrapper
+ [default: False].
+ unit : str, optional
+ String that will be used to define the unit of each iteration
+ [default: 'it'].
+ unit_scale : bool, optional
+ If set, the number of iterations will be reduced/scaled
+ automatically and a metric prefix following the
+ International System of Units standard will be added
+ (kilo, mega, etc.) [default: False].
+ dynamic_ncols : bool, optional
+ If set, constantly alters `ncols` to the environment (allowing
+ for window resizes) [default: False].
+ smoothing : float
+ Exponential moving average smoothing factor for speed estimates
+ (ignored in GUI mode). Ranges from 0 (average speed) to 1
+ (current/instantaneous speed) [default: 0.3].
+ nested : bool, optional
+ Whether this iterable is nested in another one also managed by
+ `tqdm` [default: False]. Allows display of multiple, nested
+ progress bars.
+ gui : bool, optional
+ WARNING: internal paramer - do not use.
+ Use tqdm_gui(...) instead. If set, will attempt to use
+ matplotlib animations for a graphical output [default: false].
+
+ Returns
+ -------
+ out : decorated iterator.
+ """
+ # Preprocess the arguments
+ if total is None and iterable is not None:
+ try:
+ total = len(iterable)
+ except (TypeError, AttributeError):
+ total = None
+
+ if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
+ dynamic_ncols:
+ if dynamic_ncols: # pragma: no cover
+ dynamic_ncols = _environ_cols_wrapper()
+ ncols = dynamic_ncols(file)
+ else:
+ ncols = _environ_cols_wrapper()(file)
+
+ if miniters is None:
+ miniters = 0
+ dynamic_miniters = True
+ else:
+ dynamic_miniters = False
+
+ if mininterval is None:
+ mininterval = 0
+
+ if maxinterval is None:
+ maxinterval = 0
+
+ if ascii is None:
+ ascii = not _supports_unicode(file)
+
+ if smoothing is None:
+ smoothing = 0
+
+ # Store the arguments
+ self.iterable = iterable
+ self.desc = desc + ': ' if desc else ''
+ self.total = total
+ self.leave = leave
+ self.fp = file
+ self.ncols = ncols
+ self.mininterval = mininterval
+ self.maxinterval = maxinterval
+ self.miniters = miniters
+ self.dynamic_miniters = dynamic_miniters
+ self.ascii = ascii
+ self.disable = disable
+ self.unit = unit
+ self.unit_scale = unit_scale
+ self.gui = gui
+ self.dynamic_ncols = dynamic_ncols
+ self.smoothing = smoothing
+ self.avg_rate = None
+ # if nested, at initial sp() call we replace '\r' by '\n' to
+ # not overwrite the outer progress bar
+ self.nested = nested
+
+ if not gui:
+ # Initialize the screen printer
+ self.sp = StatusPrinter(self.fp)
+ if not disable:
+ if self.nested:
+ self.fp.write('\n')
+ self.sp(format_meter(0, total, 0,
+ (dynamic_ncols(file) if dynamic_ncols else ncols),
+ self.desc, ascii, unit, unit_scale))
+
+ # Init the time/iterations counters
+ self.start_t = self.last_print_t = time()
+ self.last_print_n = 0
+ self.n = 0
+
+ def __len__(self):
+ return len(self.iterable) if self.iterable else self.total
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *exc):
+ self.close()
+ return False
+
+ def __iter__(self):
+ ''' Backward-compatibility to use: for x in tqdm(iterable) '''
+
+ # Inlining instance variables as locals (speed optimisation)
+ iterable = self.iterable
+
+ # If the bar is disabled, then just walk the iterable
+ # (note: keep this check outside the loop for performance)
+ if self.disable:
+ for obj in iterable:
+ yield obj
+ else:
+ ncols = self.ncols
+ mininterval = self.mininterval
+ maxinterval = self.maxinterval
+ miniters = self.miniters
+ dynamic_miniters = self.dynamic_miniters
+ unit = self.unit
+ unit_scale = self.unit_scale
+ ascii = self.ascii
+ start_t = self.start_t
+ last_print_t = self.last_print_t
+ last_print_n = self.last_print_n
+ n = self.n
+ dynamic_ncols = self.dynamic_ncols
+ smoothing = self.smoothing
+ avg_rate = self.avg_rate
+
+ try:
+ sp = self.sp
+ except AttributeError:
+ raise DeprecationWarning('Please use tqdm_gui(...)'
+ ' instead of tqdm(..., gui=True)')
+
+ for obj in iterable:
+ yield obj
+ # Update and print the progressbar.
+ # Note: does not call self.update(1) for speed optimisation.
+ n += 1
+ delta_it = n - last_print_n
+ # check the counter first (avoid calls to time())
+ if delta_it >= miniters:
+ cur_t = time()
+ delta_t = cur_t - last_print_t
+ if delta_t >= mininterval:
+ elapsed = cur_t - start_t
+ # EMA (not just overall average)
+ if smoothing and delta_t:
+ avg_rate = delta_it / delta_t \
+ if avg_rate is None \
+ else smoothing * delta_it / delta_t + \
+ (1 - smoothing) * avg_rate
+
+ sp(format_meter(
+ n, self.total, elapsed,
+ (dynamic_ncols(self.fp) if dynamic_ncols
+ else ncols),
+ self.desc, ascii, unit, unit_scale, avg_rate))
+
+ # If no `miniters` was specified, adjust automatically
+ # to the maximum iteration rate seen so far.
+ if dynamic_miniters:
+ if maxinterval and delta_t > maxinterval:
+ # Set miniters to correspond to maxinterval
+ miniters = delta_it * maxinterval / delta_t
+ elif mininterval and delta_t:
+ # EMA-weight miniters to converge
+ # towards the timeframe of mininterval
+ miniters = smoothing * delta_it * mininterval \
+ / delta_t + (1 - smoothing) * miniters
+ else:
+ miniters = smoothing * delta_it + \
+ (1 - smoothing) * miniters
+
+ # Store old values for next call
+ last_print_n = n
+ last_print_t = cur_t
+
+ # Closing the progress bar.
+ # Update some internal variables for close().
+ self.last_print_n = last_print_n
+ self.n = n
+ self.close()
+
+ def update(self, n=1):
+ """
+ Manually update the progress bar, useful for streams
+ such as reading files.
+ E.g.:
+ >>> t = tqdm(total=filesize) # Initialise
+ >>> for current_buffer in stream:
+ ... ...
+ ... t.update(len(current_buffer))
+ >>> t.close()
+ The last line is highly recommended, but possibly not necessary if
+ `t.update()` will be called in such a way that `filesize` will be
+ exactly reached and printed.
+
+ Parameters
+ ----------
+ n : int
+ Increment to add to the internal counter of iterations
+ [default: 1].
+ """
+ if self.disable:
+ return
+
+ if n < 1:
+ n = 1
+ self.n += n
+
+ delta_it = self.n - self.last_print_n # should be n?
+ if delta_it >= self.miniters:
+ # We check the counter first, to reduce the overhead of time()
+ cur_t = time()
+ delta_t = cur_t - self.last_print_t
+ if delta_t >= self.mininterval:
+ elapsed = cur_t - self.start_t
+ # EMA (not just overall average)
+ if self.smoothing and delta_t:
+ self.avg_rate = delta_it / delta_t \
+ if self.avg_rate is None \
+ else self.smoothing * delta_it / delta_t + \
+ (1 - self.smoothing) * self.avg_rate
+
+ if not hasattr(self, "sp"):
+ raise DeprecationWarning('Please use tqdm_gui(...)'
+ ' instead of tqdm(..., gui=True)')
+
+ self.sp(format_meter(
+ self.n, self.total, elapsed,
+ (self.dynamic_ncols(self.fp) if self.dynamic_ncols
+ else self.ncols),
+ self.desc, self.ascii, self.unit, self.unit_scale,
+ self.avg_rate))
+
+ # If no `miniters` was specified, adjust automatically to the
+ # maximum iteration rate seen so far.
+ # e.g.: After running `tqdm.update(5)`, subsequent
+ # calls to `tqdm.update()` will only cause an update after
+ # at least 5 more iterations.
+ if self.dynamic_miniters:
+ if self.maxinterval and delta_t > self.maxinterval:
+ self.miniters = self.miniters * self.maxinterval \
+ / delta_t
+ elif self.mininterval and delta_t:
+ self.miniters = self.smoothing * delta_it \
+ * self.mininterval / delta_t + \
+ (1 - self.smoothing) * self.miniters
+ else:
+ self.miniters = self.smoothing * delta_it + \
+ (1 - self.smoothing) * self.miniters
+
+ # Store old values for next call
+ self.last_print_n = self.n
+ self.last_print_t = cur_t
+
+ def close(self):
+ """
+ Cleanup and (if leave=False) close the progressbar.
+ """
+ if self.disable:
+ return
+
+ endchar = '\r'
+ if self.nested:
+ endchar += _term_move_up()
+
+ if self.leave:
+ if self.last_print_n < self.n:
+ cur_t = time()
+ # stats for overall rate (no weighted average)
+ self.sp(format_meter(
+ self.n, self.total, cur_t - self.start_t,
+ (self.dynamic_ncols(self.fp) if self.dynamic_ncols
+ else self.ncols),
+ self.desc, self.ascii, self.unit, self.unit_scale))
+ if self.nested:
+ self.fp.write(endchar)
+ else:
+ self.fp.write('\n')
+ else:
+ self.sp('')
+ self.fp.write(endchar)
+
+ def set_description(self, desc=None):
+ """
+ Set/modify description of the progress bar.
+ """
+ self.desc = desc + ': ' if desc else ''
+
+
+def trange(*args, **kwargs):
+ """
+ A shortcut for tqdm(xrange(*args), **kwargs).
+ On Python3+ range is used instead of xrange.
+ """
+ return tqdm(_range(*args), **kwargs)
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/extern/tqdm/_tqdm_gui.py
--- /dev/null
+++ b/yt/extern/tqdm/_tqdm_gui.py
@@ -0,0 +1,308 @@
+"""
+GUI progressbar decorator for iterators.
+Includes a default (x)range iterator printing to stderr.
+
+Usage:
+ >>> from tqdm_gui import tgrange[, tqdm_gui]
+ >>> for i in tgrange(10): #same as: for i in tqdm_gui(xrange(10))
+ ... ...
+"""
+# future division is important to divide integers and get as
+# a result precise floating numbers (instead of truncated int)
+from __future__ import division, absolute_import
+# import compatibility functions and utilities
+from time import time
+from ._utils import _range
+# to inherit from the tqdm class
+from ._tqdm import tqdm, format_meter
+
+
+__author__ = {"github.com/": ["casperdcl", "lrq3000"]}
+__all__ = ['tqdm_gui', 'tgrange']
+
+
+class tqdm_gui(tqdm): # pragma: no cover
+ """
+ Experimental GUI version of tqdm!
+ """
+ def __init__(self, *args, **kwargs):
+
+ # try: # pragma: no cover
+ import matplotlib as mpl
+ import matplotlib.pyplot as plt
+ from collections import deque
+ # except ImportError: # gui not available
+ # kwargs['gui'] = False
+ # else:
+ kwargs['gui'] = True
+
+ super(tqdm_gui, self).__init__(*args, **kwargs)
+
+ # Initialize the GUI display
+ if self.disable or not kwargs['gui']:
+ return
+
+ self.fp.write('Warning: GUI is experimental/alpha\n')
+ self.mpl = mpl
+ self.plt = plt
+ self.sp = None
+
+ # Remember if external environment uses toolbars
+ self.toolbar = self.mpl.rcParams['toolbar']
+ self.mpl.rcParams['toolbar'] = 'None'
+
+ self.mininterval = max(self.mininterval, 0.5)
+ self.fig, ax = plt.subplots(figsize=(9, 2.2))
+ # self.fig.subplots_adjust(bottom=0.2)
+ if self.total:
+ self.xdata = []
+ self.ydata = []
+ self.zdata = []
+ else:
+ self.xdata = deque([])
+ self.ydata = deque([])
+ self.zdata = deque([])
+ self.line1, = ax.plot(self.xdata, self.ydata, color='b')
+ self.line2, = ax.plot(self.xdata, self.zdata, color='k')
+ ax.set_ylim(0, 0.001)
+ if self.total:
+ ax.set_xlim(0, 100)
+ ax.set_xlabel('percent')
+ self.fig.legend((self.line1, self.line2), ('cur', 'est'),
+ loc='center right')
+ # progressbar
+ self.hspan = plt.axhspan(0, 0.001,
+ xmin=0, xmax=0, color='g')
+ else:
+ # ax.set_xlim(-60, 0)
+ ax.set_xlim(0, 60)
+ ax.invert_xaxis()
+ ax.set_xlabel('seconds')
+ ax.legend(('cur', 'est'), loc='lower left')
+ ax.grid()
+ # ax.set_xlabel('seconds')
+ ax.set_ylabel((self.unit if self.unit else 'it') + '/s')
+ if self.unit_scale:
+ plt.ticklabel_format(style='sci', axis='y',
+ scilimits=(0, 0))
+ ax.yaxis.get_offset_text().set_x(-0.15)
+
+ # Remember if external environment is interactive
+ self.wasion = plt.isinteractive()
+ plt.ion()
+ self.ax = ax
+
+ def __iter__(self):
+ # TODO: somehow allow the following:
+ # if not self.gui:
+ # return super(tqdm_gui, self).__iter__()
+ iterable = self.iterable
+ if self.disable:
+ for obj in iterable:
+ yield obj
+ return
+
+ # ncols = self.ncols
+ mininterval = self.mininterval
+ miniters = self.miniters
+ dynamic_miniters = self.dynamic_miniters
+ unit = self.unit
+ unit_scale = self.unit_scale
+ ascii = self.ascii
+ start_t = self.start_t
+ last_print_t = self.last_print_t
+ last_print_n = self.last_print_n
+ n = self.n
+ # dynamic_ncols = self.dynamic_ncols
+ # smoothing = self.smoothing
+ # avg_rate = self.avg_rate
+
+ plt = self.plt
+ ax = self.ax
+ xdata = self.xdata
+ ydata = self.ydata
+ zdata = self.zdata
+ line1 = self.line1
+ line2 = self.line2
+
+ for obj in iterable:
+ yield obj
+ # Update and print the progressbar.
+ # Note: does not call self.update(1) for speed optimisation.
+ n += 1
+ delta_it = n - last_print_n
+ # check the counter first (avoid calls to time())
+ if delta_it >= miniters:
+ cur_t = time()
+ delta_t = cur_t - last_print_t
+ if delta_t >= mininterval: # pragma: no cover
+ elapsed = cur_t - start_t
+ # Inline due to multiple calls
+ total = self.total
+ # instantaneous rate
+ y = delta_it / delta_t
+ # overall rate
+ z = n / elapsed
+ # update line data
+ xdata.append(n * 100.0 / total if total else cur_t)
+ ydata.append(y)
+ zdata.append(z)
+
+ # Discard old values
+ # xmin, xmax = ax.get_xlim()
+ # if (not total) and elapsed > xmin * 1.1:
+ if (not total) and elapsed > 66:
+ xdata.popleft()
+ ydata.popleft()
+ zdata.popleft()
+
+ ymin, ymax = ax.get_ylim()
+ if y > ymax or z > ymax:
+ ymax = 1.1 * y
+ ax.set_ylim(ymin, ymax)
+ ax.figure.canvas.draw()
+
+ if total:
+ line1.set_data(xdata, ydata)
+ line2.set_data(xdata, zdata)
+ try:
+ poly_lims = self.hspan.get_xy()
+ except AttributeError:
+ self.hspan = plt.axhspan(0, 0.001, xmin=0,
+ xmax=0, color='g')
+ poly_lims = self.hspan.get_xy()
+ poly_lims[0, 1] = ymin
+ poly_lims[1, 1] = ymax
+ poly_lims[2] = [n / total, ymax]
+ poly_lims[3] = [poly_lims[2, 0], ymin]
+ if len(poly_lims) > 4:
+ poly_lims[4, 1] = ymin
+ self.hspan.set_xy(poly_lims)
+ else:
+ t_ago = [cur_t - i for i in xdata]
+ line1.set_data(t_ago, ydata)
+ line2.set_data(t_ago, zdata)
+
+ ax.set_title(format_meter(
+ n, total, elapsed, 0,
+ self.desc, ascii, unit, unit_scale),
+ fontname="DejaVu Sans Mono",
+ fontsize=11)
+ plt.pause(1e-9)
+
+ # If no `miniters` was specified, adjust automatically
+ # to the maximum iteration rate seen so far.
+ if dynamic_miniters:
+ miniters = max(miniters, delta_it)
+
+ # Store old values for next call
+ last_print_n = n
+ last_print_t = cur_t
+
+ # Closing the progress bar.
+ # Update some internal variables for close().
+ self.last_print_n = last_print_n
+ self.n = n
+ self.close()
+
+ def update(self, n=1):
+ # if not self.gui:
+ # return super(tqdm_gui, self).close()
+ if self.disable:
+ return
+
+ if n < 1:
+ n = 1
+ self.n += n
+
+ delta_it = self.n - self.last_print_n # should be n?
+ if delta_it >= self.miniters:
+ # We check the counter first, to reduce the overhead of time()
+ cur_t = time()
+ delta_t = cur_t - self.last_print_t
+ if delta_t >= self.mininterval:
+ elapsed = cur_t - self.start_t
+ # Inline due to multiple calls
+ total = self.total
+ ax = self.ax
+
+ # instantaneous rate
+ y = delta_it / delta_t
+ # smoothed rate
+ z = self.n / elapsed
+ # update line data
+ self.xdata.append(self.n * 100.0 / total
+ if total else cur_t)
+ self.ydata.append(y)
+ self.zdata.append(z)
+
+ # Discard old values
+ if (not total) and elapsed > 66:
+ self.xdata.popleft()
+ self.ydata.popleft()
+ self.zdata.popleft()
+
+ ymin, ymax = ax.get_ylim()
+ if y > ymax or z > ymax:
+ ymax = 1.1 * y
+ ax.set_ylim(ymin, ymax)
+ ax.figure.canvas.draw()
+
+ if total:
+ self.line1.set_data(self.xdata, self.ydata)
+ self.line2.set_data(self.xdata, self.zdata)
+ try:
+ poly_lims = self.hspan.get_xy()
+ except AttributeError:
+ self.hspan = self.plt.axhspan(0, 0.001, xmin=0,
+ xmax=0, color='g')
+ poly_lims = self.hspan.get_xy()
+ poly_lims[0, 1] = ymin
+ poly_lims[1, 1] = ymax
+ poly_lims[2] = [self.n / total, ymax]
+ poly_lims[3] = [poly_lims[2, 0], ymin]
+ if len(poly_lims) > 4:
+ poly_lims[4, 1] = ymin
+ self.hspan.set_xy(poly_lims)
+ else:
+ t_ago = [cur_t - i for i in self.xdata]
+ self.line1.set_data(t_ago, self.ydata)
+ self.line2.set_data(t_ago, self.zdata)
+
+ ax.set_title(format_meter(
+ self.n, total, elapsed, 0,
+ self.desc, self.ascii, self.unit, self.unit_scale),
+ fontname="DejaVu Sans Mono",
+ fontsize=11)
+ self.plt.pause(1e-9)
+
+ # If no `miniters` was specified, adjust automatically to the
+ # maximum iteration rate seen so far.
+ if self.dynamic_miniters:
+ self.miniters = max(self.miniters, delta_it)
+
+ # Store old values for next call
+ self.last_print_n = self.n
+ self.last_print_t = cur_t
+
+ def close(self):
+ # if not self.gui:
+ # return super(tqdm_gui, self).close()
+ if self.disable:
+ return
+
+ # Restore toolbars
+ self.mpl.rcParams['toolbar'] = self.toolbar
+ # Return to non-interactive mode
+ if not self.wasion:
+ self.plt.ioff()
+ if not self.leave:
+ self.plt.close(self.fig)
+
+
+def tgrange(*args, **kwargs):
+ """
+ A shortcut for tqdm_gui(xrange(*args), **kwargs).
+ On Python3+ range is used instead of xrange.
+ """
+ return tqdm_gui(_range(*args), **kwargs)
diff -r e24ec9b6c0053aee5a461231fc6587bfd17b4e10 -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 yt/extern/tqdm/_tqdm_pandas.py
--- /dev/null
+++ b/yt/extern/tqdm/_tqdm_pandas.py
@@ -0,0 +1,58 @@
+# future division is important to divide integers and get as
+# a result precise floating numbers (instead of truncated int)
+from __future__ import absolute_import
+
+
+__author__ = "github.com/casperdcl"
+__all__ = ['tqdm_pandas']
+
+
+def tqdm_pandas(t): # pragma: no cover
+ """
+ Registers the given `tqdm` instance with
+ `pandas.core.groupby.DataFrameGroupBy.progress_apply`.
+ It will even close() the `tqdm` instance upon completion.
+
+ Examples
+ --------
+ >>> import pandas as pd
+ >>> import numpy as np
+ >>> from tqdm import tqdm, tqdm_pandas
+ >>>
+ >>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
+ >>> tqdm_pandas(tqdm()) # can use tqdm_gui, optional kwargs, etc
+ >>> # Now you can use `progress_apply` instead of `apply`
+ >>> df.groupby(0).progress_apply(lambda x: x**2)
+
+ References
+ ----------
+ https://stackoverflow.com/questions/18603270/
+ progress-indicator-during-pandas-operations-python
+ """
+ from pandas.core.groupby import DataFrameGroupBy
+
+ def inner(groups, func, *args, **kwargs):
+ """
+ Parameters
+ ----------
+ groups : DataFrameGroupBy
+ Grouped data.
+ func : function
+ To be applied on the grouped data.
+
+ *args and *kwargs are transmitted to DataFrameGroupBy.apply()
+ """
+ t.total = len(groups) + 1 # pandas calls update once too many
+
+ def wrapper(*args, **kwargs):
+ t.update()
+ return func(*args, **kwargs)
+
+ result = groups.apply(wrapper, *args, **kwargs)
+
+ t.close()
+
+ return result
+
+ # Enable custom tqdm progress in pandas!
+ DataFrameGroupBy.progress_apply = inner
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/ab85affd291b/
Changeset: ab85affd291b
Branch: yt
User: brittonsmith
Date: 2016-01-26 18:38:57+00:00
Summary: Making sure args from pbar.update get passed along and adding a parallel option for get_pbar.
Affected #: 2 files
diff -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 -r ab85affd291b4969fff89a1effc348aae6ab4d41 yt/extern/tqdm/_tqdm.py
--- a/yt/extern/tqdm/_tqdm.py
+++ b/yt/extern/tqdm/_tqdm.py
@@ -446,15 +446,21 @@
self.n = n
self.close()
- def update(self, n=1):
+ def update(self, n=None, i=None):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
+ ... t.update(i=len(current_buffer))
+ >>> t.close()
+ >>>
+ >>> some_list = range(10)
+ >>> t = tqdm(total=len(some_list)) # Initialise
+ >>> for n, val in some_list:
... ...
- ... t.update(len(current_buffer))
+ ... t.update(n)
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
@@ -463,15 +469,23 @@
Parameters
----------
n : int
- Increment to add to the internal counter of iterations
- [default: 1].
+ The current iteration.
+ i : int
+ The increment
"""
+
+ if n is None and i is None:
+ i = 1
+ if n is not None and i is not None:
+ raise RuntimeError("Must specify either n or i, not both.")
+
if self.disable:
return
- if n < 1:
- n = 1
- self.n += n
+ if n is not None:
+ self.n = n
+ else:
+ self.n += i
delta_it = self.n - self.last_print_n # should be n?
if delta_it >= self.miniters:
diff -r 14436cdc7b46594a7f58ea7f800db78ebdf63cb6 -r ab85affd291b4969fff89a1effc348aae6ab4d41 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -340,10 +340,9 @@
# This is a drop in replacement for pbar
# called tqdm
def __init__(self,title, maxval):
- self._pbar = tqdm(leave=True,total=maxval,desc=title)
-
- def update(self,*args,**kwargs):
- self._pbar.update()
+ self._pbar = tqdm(leave=True, total=maxval, desc=title)
+ def update(self,*args, **kwargs):
+ self._pbar.update(*args, **kwargs)
def finish(self):
self._pbar.close()
@@ -385,7 +384,7 @@
def finish(self):
self._pbar.Destroy()
-def get_pbar(title, maxval):
+def get_pbar(title, maxval, parallel=False):
"""
This returns a progressbar of the most appropriate type, given a *title*
and a *maxval*.
@@ -396,7 +395,14 @@
ytcfg.getboolean("yt", "__withintesting"):
return DummyProgressBar()
elif ytcfg.getboolean("yt", "__parallel"):
- return ParallelProgressBar(title, maxval)
+ # If parallel is True, update progress on root only.
+ if parallel:
+ if is_root():
+ return TqdmProgressBar(title, maxval)
+ else:
+ return DummyProgressBar()
+ else:
+ return ParallelProgressBar(title, maxval)
pbar = TqdmProgressBar(title,maxval)
return pbar
https://bitbucket.org/yt_analysis/yt/commits/ebb9592de849/
Changeset: ebb9592de849
Branch: yt
User: brittonsmith
Date: 2016-01-27 11:52:18+00:00
Summary: Reverting changes to tqdm source.
Affected #: 1 file
diff -r ab85affd291b4969fff89a1effc348aae6ab4d41 -r ebb9592de8498735b51dad05b058e9e0b3f56e8b yt/extern/tqdm/_tqdm.py
--- a/yt/extern/tqdm/_tqdm.py
+++ b/yt/extern/tqdm/_tqdm.py
@@ -446,21 +446,15 @@
self.n = n
self.close()
- def update(self, n=None, i=None):
+ def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
- ... t.update(i=len(current_buffer))
- >>> t.close()
- >>>
- >>> some_list = range(10)
- >>> t = tqdm(total=len(some_list)) # Initialise
- >>> for n, val in some_list:
... ...
- ... t.update(n)
+ ... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
@@ -469,23 +463,15 @@
Parameters
----------
n : int
- The current iteration.
- i : int
- The increment
+ Increment to add to the internal counter of iterations
+ [default: 1].
"""
-
- if n is None and i is None:
- i = 1
- if n is not None and i is not None:
- raise RuntimeError("Must specify either n or i, not both.")
-
if self.disable:
return
- if n is not None:
- self.n = n
- else:
- self.n += i
+ if n < 1:
+ n = 1
+ self.n += n
delta_it = self.n - self.last_print_n # should be n?
if delta_it >= self.miniters:
https://bitbucket.org/yt_analysis/yt/commits/7c6757ed3b13/
Changeset: 7c6757ed3b13
Branch: yt
User: brittonsmith
Date: 2016-01-27 12:01:14+00:00
Summary: Making wrapper progress bar function as before.
Affected #: 1 file
diff -r ebb9592de8498735b51dad05b058e9e0b3f56e8b -r 7c6757ed3b13413b15bb2a951e97da44524c2cbb yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -341,8 +341,11 @@
# called tqdm
def __init__(self,title, maxval):
self._pbar = tqdm(leave=True, total=maxval, desc=title)
- def update(self,*args, **kwargs):
- self._pbar.update(*args, **kwargs)
+ self.i = 0
+ def update(self, i):
+ n = i - self.i
+ self.i = i
+ self._pbar.update(n)
def finish(self):
self._pbar.close()
https://bitbucket.org/yt_analysis/yt/commits/9fbf8e52001a/
Changeset: 9fbf8e52001a
Branch: yt
User: brittonsmith
Date: 2016-01-27 12:14:24+00:00
Summary: Merging.
Affected #: 12 files
diff -r 7c6757ed3b13413b15bb2a951e97da44524c2cbb -r 9fbf8e52001af6f02cdd9c0dfc6c2e413a4321ee yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -170,8 +170,7 @@
from yt.convenience import \
load, simulation
-from yt.testing import \
- run_nose
+from yt.testing import run_nose
# Import some helpful math utilities
from yt.utilities.math_utils import \
diff -r 7c6757ed3b13413b15bb2a951e97da44524c2cbb -r 9fbf8e52001af6f02cdd9c0dfc6c2e413a4321ee yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -437,7 +437,7 @@
pu = ParticleUnion("all", list(self.particle_types_raw))
self.add_particle_union(pu)
self.field_info.setup_extra_union_fields()
- mylog.info("Loading field plugins.")
+ mylog.debug("Loading field plugins.")
self.field_info.load_all_plugins()
deps, unloaded = self.field_info.check_derived_fields()
self.field_dependencies.update(deps)
diff -r 7c6757ed3b13413b15bb2a951e97da44524c2cbb -r 9fbf8e52001af6f02cdd9c0dfc6c2e413a4321ee yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -251,7 +251,7 @@
loaded = []
for n in sorted(field_plugins):
loaded += self.load_plugin(n, ftype)
- only_on_root(mylog.info, "Loaded %s (%s new fields)",
+ only_on_root(mylog.debug, "Loaded %s (%s new fields)",
n, len(loaded))
self.find_dependencies(loaded)
diff -r 7c6757ed3b13413b15bb2a951e97da44524c2cbb -r 9fbf8e52001af6f02cdd9c0dfc6c2e413a4321ee yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -16,6 +16,7 @@
#-----------------------------------------------------------------------------
from yt.extern.six import string_types
+from yt.funcs import only_on_root
from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import stat
@@ -169,7 +170,7 @@
# It may be possible to deduce whether ComovingIntegration is on
# somehow, but opinions on this vary.
if self.omega_lambda == 0.0:
- mylog.info("Omega Lambda is 0.0, so we are turning off Cosmology.")
+ only_on_root(mylog.info, "Omega Lambda is 0.0, so we are turning off Cosmology.")
self.hubble_constant = 1.0 # So that scaling comes out correct
self.cosmological_simulation = 0
self.current_redshift = 0.0
@@ -183,8 +184,8 @@
cosmo = Cosmology(self.hubble_constant,
self.omega_matter, self.omega_lambda)
self.current_time = cosmo.hubble_time(self.current_redshift)
- mylog.info("Calculating time from %0.3e to be %0.3e seconds",
- hvals["Time"], self.current_time)
+ only_on_root(mylog.info, "Calculating time from %0.3e to be %0.3e seconds",
+ hvals["Time"], self.current_time)
self.parameters = hvals
prefix = os.path.abspath(
@@ -202,10 +203,10 @@
# If no units passed in by user, set a sane default (Gadget-2 users guide).
if self._unit_base is None:
if self.cosmological_simulation == 1:
- mylog.info("Assuming length units are in kpc/h (comoving)")
+ only_on_root(mylog.info, "Assuming length units are in kpc/h (comoving)")
self._unit_base = dict(length = (1.0, "kpccm/h"))
else:
- mylog.info("Assuming length units are in kpc (physical)")
+ only_on_root(mylog.info, "Assuming length units are in kpc (physical)")
self._unit_base = dict(length = (1.0, "kpc"))
# If units passed in by user, decide what to do about
diff -r 7c6757ed3b13413b15bb2a951e97da44524c2cbb -r 9fbf8e52001af6f02cdd9c0dfc6c2e413a4321ee yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -32,10 +32,14 @@
from yt.frontends.gadget_fof.fields import \
GadgetFOFFieldInfo, \
GadgetFOFHaloFieldInfo
+from yt.funcs import \
+ only_on_root
from yt.geometry.particle_geometry_handler import \
ParticleIndex
from yt.utilities.cosmology import \
Cosmology
+from yt.utilities.exceptions import \
+ YTException
from yt.utilities.logger import ytLogger as \
mylog
@@ -213,7 +217,7 @@
def _set_code_unit_attributes(self):
# Set a sane default for cosmological simulations.
if self._unit_base is None and self.cosmological_simulation == 1:
- mylog.info("Assuming length units are in Mpc/h (comoving)")
+ only_on_root(mylog.info, "Assuming length units are in Mpc/h (comoving)")
self._unit_base = dict(length = (1.0, "Mpccm/h"))
# The other same defaults we will use from the standard Gadget
# defaults.
diff -r 7c6757ed3b13413b15bb2a951e97da44524c2cbb -r 9fbf8e52001af6f02cdd9c0dfc6c2e413a4321ee yt/frontends/owls_subfind/data_structures.py
--- a/yt/frontends/owls_subfind/data_structures.py
+++ b/yt/frontends/owls_subfind/data_structures.py
@@ -24,6 +24,7 @@
from .fields import \
OWLSSubfindFieldInfo
+from yt.funcs import only_on_root
from yt.utilities.exceptions import \
YTException
from yt.utilities.logger import ytLogger as \
@@ -157,7 +158,7 @@
def _set_code_unit_attributes(self):
# Set a sane default for cosmological simulations.
if self._unit_base is None and self.cosmological_simulation == 1:
- mylog.info("Assuming length units are in Mpc/h (comoving)")
+ only_on_root(mylog.info, "Assuming length units are in Mpc/h (comoving)")
self._unit_base = dict(length = (1.0, "Mpccm/h"))
# The other same defaults we will use from the standard Gadget
# defaults.
diff -r 7c6757ed3b13413b15bb2a951e97da44524c2cbb -r 9fbf8e52001af6f02cdd9c0dfc6c2e413a4321ee yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -112,7 +112,7 @@
pu = ParticleUnion("all", list(self.particle_types_raw))
self.add_particle_union(pu)
self.field_info.setup_extra_union_fields()
- mylog.info("Loading field plugins.")
+ mylog.debug("Loading field plugins.")
self.field_info.load_all_plugins()
deps, unloaded = self.field_info.check_derived_fields()
self.field_dependencies.update(deps)
diff -r 7c6757ed3b13413b15bb2a951e97da44524c2cbb -r 9fbf8e52001af6f02cdd9c0dfc6c2e413a4321ee yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -18,6 +18,7 @@
import os
import weakref
+from yt.funcs import only_on_root
from yt.utilities.logger import ytLogger as mylog
from yt.data_objects.octree_subset import ParticleOctreeSubset
from yt.geometry.geometry_handler import Index, YTDataChunk
@@ -67,7 +68,7 @@
[1, 1, 1], ds.domain_left_edge, ds.domain_right_edge,
over_refine = ds.over_refine_factor)
self.oct_handler.n_ref = ds.n_ref
- mylog.info("Allocating for %0.3e particles", self.total_particles)
+ only_on_root(mylog.info, "Allocating for %0.3e particles", self.total_particles)
# No more than 256^3 in the region finder.
N = min(len(self.data_files), 256)
self.regions = ParticleRegions(
@@ -77,7 +78,7 @@
self.oct_handler.finalize()
self.max_level = self.oct_handler.max_level
tot = sum(self.oct_handler.recursively_count().values())
- mylog.info("Identified %0.3e octs", tot)
+ only_on_root(mylog.info, "Identified %0.3e octs", tot)
def _initialize_indices(self):
# This will be replaced with a parallel-aware iteration step.
diff -r 7c6757ed3b13413b15bb2a951e97da44524c2cbb -r 9fbf8e52001af6f02cdd9c0dfc6c2e413a4321ee yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -31,7 +31,12 @@
from numpy.testing import assert_string_equal # NOQA
from numpy.testing import assert_array_almost_equal_nulp # NOQA
from numpy.testing import assert_allclose, assert_raises # NOQA
-from nose.tools import assert_true, assert_less_equal # NOQA
+try:
+ from nose.tools import assert_true, assert_less_equal # NOQA
+except ImportError:
+ # This means nose isn't installed, so the tests can't run and it's ok
+ # to not import these functions
+ pass
from yt.convenience import load
from yt.units.yt_array import YTArray, YTQuantity
from yt.utilities.exceptions import YTUnitOperationError
@@ -779,11 +784,9 @@
def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False,
call_pdb = False):
- import nose
- import os
+ from yt.utilities.on_demand_imports import _nose
import sys
- import yt
- from yt.funcs import mylog
+ from yt.utilities.logger import ytLogger as mylog
orig_level = mylog.getEffectiveLevel()
mylog.setLevel(50)
nose_argv = sys.argv
@@ -797,7 +800,7 @@
if answer_big_data:
nose_argv.append('--answer-big-data')
initial_dir = os.getcwd()
- yt_file = os.path.abspath(yt.__file__)
+ yt_file = os.path.abspath(__file__)
yt_dir = os.path.dirname(yt_file)
if os.path.samefile(os.path.dirname(yt_dir), initial_dir):
# Provide a nice error message to work around nose bug
@@ -815,7 +818,7 @@
)
os.chdir(yt_dir)
try:
- nose.run(argv=nose_argv)
+ _nose.run(argv=nose_argv)
finally:
os.chdir(initial_dir)
mylog.setLevel(orig_level)
diff -r 7c6757ed3b13413b15bb2a951e97da44524c2cbb -r 9fbf8e52001af6f02cdd9c0dfc6c2e413a4321ee yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -25,6 +25,7 @@
from yt.testing import \
fake_random_ds, assert_allclose_units, \
assert_almost_equal
+from yt.units.unit_registry import UnitRegistry
# dimensions
from yt.units.dimensions import \
@@ -474,3 +475,10 @@
test_unit = Unit('cm**-3', base_value=1.0, registry=ds.unit_registry)
assert_equal(test_unit.latex_repr, '\\frac{1}{\\rm{cm}^{3}}')
+
+def test_registry_json():
+ reg = UnitRegistry()
+ json_reg = reg.to_json()
+ unserialized_reg = UnitRegistry.from_json(json_reg)
+
+ assert_equal(reg.lut, unserialized_reg.lut)
diff -r 7c6757ed3b13413b15bb2a951e97da44524c2cbb -r 9fbf8e52001af6f02cdd9c0dfc6c2e413a4321ee yt/units/unit_registry.py
--- a/yt/units/unit_registry.py
+++ b/yt/units/unit_registry.py
@@ -12,8 +12,22 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
+import json
+import re
+
+from distutils.version import LooseVersion
from yt.units.unit_lookup_table import \
default_unit_symbol_lut
+from yt.extern import six
+from sympy import \
+ sympify, \
+ srepr, \
+ __version__ as sympy_version
+
+SYMPY_VERSION = LooseVersion(sympy_version)
+
+def positive_symbol_replacer(match):
+ return match.group().replace(')\')', ')\', positive=True)')
class SymbolNotFoundError(Exception):
pass
@@ -102,3 +116,34 @@
"""
return self.lut.keys()
+
+ def to_json(self):
+ """
+ Returns a json-serialized version of the unit registry
+ """
+ sanitized_lut = {}
+ for k, v in six.iteritems(self.lut):
+ san_v = list(v)
+ repr_dims = srepr(v[1])
+ if SYMPY_VERSION < LooseVersion("1.0.0"):
+ # see https://github.com/sympy/sympy/issues/6131
+ repr_dims = re.sub("Symbol\('\([a-z_]*\)'\)",
+ positive_symbol_replacer, repr_dims)
+ san_v[1] = repr_dims
+ sanitized_lut[k] = tuple(san_v)
+
+ return json.dumps(sanitized_lut)
+
+ @classmethod
+ def from_json(cls, json_text):
+ """
+ Returns a UnitRegistry object from a json-serialized unit registry
+ """
+ data = json.loads(json_text)
+ lut = {}
+ for k, v in six.iteritems(data):
+ unsan_v = list(v)
+ unsan_v[1] = sympify(v[1])
+ lut[k] = tuple(unsan_v)
+
+ return cls(lut=lut, add_default_symbols=False)
diff -r 7c6757ed3b13413b15bb2a951e97da44524c2cbb -r 9fbf8e52001af6f02cdd9c0dfc6c2e413a4321ee yt/utilities/on_demand_imports.py
--- a/yt/utilities/on_demand_imports.py
+++ b/yt/utilities/on_demand_imports.py
@@ -13,14 +13,20 @@
class NotAModule(object):
"""
A class to implement an informative error message that will be outputted if
- someone tries to use an on-demand import without having the requisite package installed.
+ someone tries to use an on-demand import without having the requisite
+ package installed.
"""
def __init__(self, pkg_name):
self.pkg_name = pkg_name
+ self.error = ImportError(
+ "This functionality requires the %s "
+ "package to be installed." % self.pkg_name)
def __getattr__(self, item):
- raise ImportError("This functionality requires the %s package to be installed."
- % self.pkg_name)
+ raise self.error
+
+ def __call__(self, *args, **kwargs):
+ raise self.error
class astropy_imports:
_name = "astropy"
@@ -272,3 +278,18 @@
return self._version
_h5py = h5py_imports()
+
+class nose_imports:
+ _name = "nose"
+ _run = None
+ @property
+ def run(self):
+ if self._run is None:
+ try:
+ from nose import run
+ except ImportError:
+ run = NotAModule(self._name)
+ self._run = run
+ return self._run
+
+_nose = nose_imports()
https://bitbucket.org/yt_analysis/yt/commits/39e17fbf3a8f/
Changeset: 39e17fbf3a8f
Branch: yt
User: brittonsmith
Date: 2016-01-27 18:06:07+00:00
Summary: Merging.
Affected #: 6 files
diff -r 440902ffb795b23c044a09e96c3d1f2e1539273c -r 39e17fbf3a8ff16dcf79ed51b15437a5f2f23a4c doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1232,10 +1232,9 @@
yt has support for reading halo catalogs produced by Rockstar and the inline
FOF/SUBFIND halo finders of Gadget and OWLS. The halo catalogs are treated as
-particle datasets where each particle represents a single halo. At this time,
-yt does not have the ability to load the member particles for a given halo.
-However, once loaded, further halo analysis can be performed using
-:ref:`halo_catalog`.
+particle datasets where each particle represents a single halo. Member particles
+for individual halos can be accessed through halo data containers. Further halo
+analysis can be performed using :ref:`halo_catalog`.
In the case where halo catalogs are written to multiple files, one must only
give the path to one of them.
@@ -1269,11 +1268,39 @@
# x component of the spin
print ad["Subhalo", "SubhaloSpin_0"]
+Halo member particles are accessed by creating halo data containers with the
+type of halo ("Group" or "Subhalo") and the halo id. Scalar values for halos
+can be accessed in the same way. Halos also have mass, position, and velocity
+attributes.
+
+.. code-block:: python
+
+ halo = ds.halo("Group", 0)
+ # member particles for this halo
+ print halo["member_ids"]
+ # halo virial radius
+ print halo["Group_R_Crit200"]
+ # halo mass
+ print halo.mass
+
+Subhalos containers can be created using either their absolute ids or their
+subhalo ids.
+
+.. code-block:: python
+
+ # first subhalo of the first halo
+ subhalo = ds.halo("Subhalo", (0, 0))
+ # this subhalo's absolute id
+ print subhalo.group_identifier
+ # member particles
+ print subhalo["member_ids"]
+
OWLS FOF/SUBFIND
^^^^^^^^^^^^^^^^
OWLS halo catalogs have a very similar structure to regular Gadget halo catalogs.
-The two field types are "FOF" and "SUBFIND".
+The two field types are "FOF" and "SUBFIND". At this time, halo member particles
+cannot be loaded.
.. code-block:: python
diff -r 440902ffb795b23c044a09e96c3d1f2e1539273c -r 39e17fbf3a8ff16dcf79ed51b15437a5f2f23a4c yt/frontends/gadget_fof/api.py
--- a/yt/frontends/gadget_fof/api.py
+++ b/yt/frontends/gadget_fof/api.py
@@ -15,12 +15,19 @@
#-----------------------------------------------------------------------------
from .data_structures import \
- GadgetFOFDataset
+ GadgetFOFParticleIndex, \
+ GadgetFOFHDF5File, \
+ GadgetFOFDataset, \
+ GadgetFOFHaloParticleIndex, \
+ GadgetFOFHaloDataset, \
+ GagdetFOFHaloContainer
from .io import \
- IOHandlerGadgetFOFHDF5
+ IOHandlerGadgetFOFHDF5, \
+ IOHandlerGadgetFOFHaloHDF5
from .fields import \
- GadgetFOFFieldInfo
+ GadgetFOFFieldInfo, \
+ GadgetFOFHaloFieldInfo
from . import tests
diff -r 440902ffb795b23c044a09e96c3d1f2e1539273c -r 39e17fbf3a8ff16dcf79ed51b15437a5f2f23a4c yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -15,35 +15,46 @@
#-----------------------------------------------------------------------------
from collections import defaultdict
+from functools import partial
from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import stat
-import glob
import os
+import weakref
-from yt.funcs import only_on_root
+from yt.data_objects.data_containers import \
+ YTSelectionContainer
+from yt.data_objects.static_output import \
+ Dataset, \
+ ParticleFile
+from yt.frontends.gadget.data_structures import \
+ _fix_unit_ordering
from yt.frontends.gadget_fof.fields import \
- GadgetFOFFieldInfo
-
+ GadgetFOFFieldInfo, \
+ GadgetFOFHaloFieldInfo
+from yt.funcs import \
+ only_on_root
+from yt.geometry.particle_geometry_handler import \
+ ParticleIndex
from yt.utilities.cosmology import \
Cosmology
from yt.utilities.exceptions import \
YTException
from yt.utilities.logger import ytLogger as \
mylog
-from yt.geometry.particle_geometry_handler import \
- ParticleIndex
-from yt.data_objects.static_output import \
- Dataset, \
- ParticleFile
-from yt.frontends.gadget.data_structures import \
- _fix_unit_ordering
-
class GadgetFOFParticleIndex(ParticleIndex):
def __init__(self, ds, dataset_type):
super(GadgetFOFParticleIndex, self).__init__(ds, dataset_type)
+ def _calculate_particle_count(self):
+ """
+ Calculate the total number of each type of particle.
+ """
+ self.particle_count = \
+ dict([(ptype, sum([d.total_particles[ptype] for d in self.data_files]))
+ for ptype in self.ds.particle_types_raw])
+
def _calculate_particle_index_starts(self):
# Halo indices are not saved in the file, so we must count by hand.
# File 0 has halos 0 to N_0 - 1, file 1 has halos N_0 to N_0 + N_1 - 1, etc.
@@ -57,9 +68,14 @@
particle_count[ptype] += data_file.total_particles[ptype]
offset_count += data_file.total_offset
+ self._halo_index_start = \
+ dict([(ptype, np.array([data_file.index_start[ptype]
+ for data_file in self.data_files]))
+ for ptype in self.ds.particle_types_raw])
+
def _calculate_file_offset_map(self):
- # After the FOF is performed, a load-balancing step redistributes halos
- # and then writes more fields. Here, for each file, we create a list of
+ # After the FOF is performed, a load-balancing step redistributes halos
+ # and then writes more fields. Here, for each file, we create a list of
# files which contain the rest of the redistributed particles.
ifof = np.array([data_file.total_particles["Group"]
for data_file in self.data_files])
@@ -73,40 +89,54 @@
data_file.offset_files = self.data_files[istart[i]: iend[i] + 1]
def _detect_output_fields(self):
- # TODO: Add additional fields
- dsl = []
+ field_list = []
units = {}
- for dom in self.data_files:
- fl, _units = self.io._identify_fields(dom)
+ found_fields = \
+ dict([(ptype, False)
+ for ptype, pnum in self.particle_count.items()
+ if pnum > 0])
+
+ for data_file in self.data_files:
+ fl, _units = self.io._identify_fields(data_file)
units.update(_units)
- dom._calculate_offsets(fl)
- for f in fl:
- if f not in dsl: dsl.append(f)
- self.field_list = dsl
+ field_list.extend([f for f in fl if f not in field_list])
+ for ptype in found_fields:
+ found_fields[ptype] |= data_file.total_particles[ptype]
+ if all(found_fields.values()): break
+
+ self.field_list = field_list
ds = self.dataset
- ds.particle_types = tuple(set(pt for pt, ds in dsl))
- # This is an attribute that means these particle types *actually*
- # exist. As in, they are real, in the dataset.
+ ds.particle_types = tuple(set(pt for pt, ds in field_list))
ds.field_units.update(units)
- ds.particle_types_raw = tuple(sorted(ds.particle_types))
-
+ ds.particle_types_raw = ds.particle_types
+
def _setup_geometry(self):
super(GadgetFOFParticleIndex, self)._setup_geometry()
+ self._calculate_particle_count()
self._calculate_particle_index_starts()
self._calculate_file_offset_map()
-
+
class GadgetFOFHDF5File(ParticleFile):
def __init__(self, ds, io, filename, file_id):
+ with h5py.File(filename, "r") as f:
+ self.header = \
+ dict((str(field), val)
+ for field, val in f["Header"].attrs.items())
+ self.group_length_sum = f["Group/GroupLen"].value.sum() \
+ if "Group/GroupLen" in f else 0
+ self.group_subs_sum = f["Group/GroupNsubs"].value.sum() \
+ if "Group/GroupNsubs" in f else 0
+ self.total_ids = self.header["Nids_ThisFile"]
+ self.total_particles = \
+ {"Group": self.header["Ngroups_ThisFile"],
+ "Subhalo": self.header["Nsubgroups_ThisFile"]}
+ self.total_offset = 0 # I think this is no longer needed
super(GadgetFOFHDF5File, self).__init__(ds, io, filename, file_id)
- with h5py.File(filename, "r") as f:
- self.header = dict((field, f.attrs[field]) \
- for field in f.attrs.keys())
-
+
class GadgetFOFDataset(Dataset):
_index_class = GadgetFOFParticleIndex
_file_class = GadgetFOFHDF5File
_field_info_class = GadgetFOFFieldInfo
- _suffix = ".hdf5"
def __init__(self, filename, dataset_type="gadget_fof_hdf5",
n_ref=16, over_refine_factor=1,
@@ -122,13 +152,36 @@
raise RuntimeError("units_override is not supported for GadgetFOFDataset. "+
"Use unit_base instead.")
super(GadgetFOFDataset, self).__init__(filename, dataset_type,
- units_override=units_override)
+ units_override=units_override)
+
+ def add_field(self, *args, **kwargs):
+ super(GadgetFOFDataset, self).add_field(*args, **kwargs)
+ self._halos_ds.add_field(*args, **kwargs)
+
+ @property
+ def halos_field_list(self):
+ return self._halos_ds.field_list
+
+ @property
+ def halos_derived_field_list(self):
+ return self._halos_ds.derived_field_list
+
+ _instantiated_halo_ds = None
+ @property
+ def _halos_ds(self):
+ if self._instantiated_halo_ds is None:
+ self._instantiated_halo_ds = GadgetFOFHaloDataset(self)
+ return self._instantiated_halo_ds
+
+ def _setup_classes(self):
+ super(GadgetFOFDataset, self)._setup_classes()
+ self.halo = partial(GagdetFOFHaloContainer, ds=self._halos_ds)
def _parse_parameter_file(self):
- handle = h5py.File(self.parameter_filename, mode="r")
- hvals = {}
- hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
- hvals["NumFiles"] = hvals["NumFiles"]
+ with h5py.File(self.parameter_filename,"r") as f:
+ self.parameters = \
+ dict((str(field), val)
+ for field, val in f["Header"].attrs.items())
self.dimensionality = 3
self.refine_by = 2
@@ -137,35 +190,29 @@
# Set standard values
self.domain_left_edge = np.zeros(3, "float64")
- self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
+ self.domain_right_edge = np.ones(3, "float64") * \
+ self.parameters["BoxSize"]
nz = 1 << self.over_refine_factor
self.domain_dimensions = np.ones(3, "int32") * nz
self.cosmological_simulation = 1
self.periodicity = (True, True, True)
- self.current_redshift = hvals["Redshift"]
- self.omega_lambda = hvals["OmegaLambda"]
- self.omega_matter = hvals["Omega0"]
- self.hubble_constant = hvals["HubbleParam"]
-
+ self.current_redshift = self.parameters["Redshift"]
+ self.omega_lambda = self.parameters["OmegaLambda"]
+ self.omega_matter = self.parameters["Omega0"]
+ self.hubble_constant = self.parameters["HubbleParam"]
cosmology = Cosmology(hubble_constant=self.hubble_constant,
omega_matter=self.omega_matter,
omega_lambda=self.omega_lambda)
self.current_time = cosmology.t_from_z(self.current_redshift)
- self.parameters = hvals
prefix = os.path.abspath(
os.path.join(os.path.dirname(self.parameter_filename),
os.path.basename(self.parameter_filename).split(".", 1)[0]))
-
suffix = self.parameter_filename.rsplit(".", 1)[-1]
self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix)
- self.file_count = len(glob.glob(prefix + "*" + self._suffix))
- if self.file_count == 0:
- raise YTException(message="No data files found.", ds=self)
+ self.file_count = self.parameters["NumFiles"]
self.particle_types = ("Group", "Subhalo")
self.particle_types_raw = ("Group", "Subhalo")
-
- handle.close()
def _set_code_unit_attributes(self):
# Set a sane default for cosmological simulations.
@@ -223,6 +270,9 @@
time_unit = (1., "s")
self.time_unit = self.quan(time_unit[0], time_unit[1])
+ def __repr__(self):
+ return self.basename.split(".", 1)[0]
+
@classmethod
def _is_valid(self, *args, **kwargs):
need_groups = ['Group', 'Header', 'Subhalo']
@@ -237,3 +287,366 @@
valid = False
pass
return valid
+
+class GadgetFOFHaloParticleIndex(GadgetFOFParticleIndex):
+ def __init__(self, ds, dataset_type):
+ self.real_ds = weakref.proxy(ds.real_ds)
+ super(GadgetFOFHaloParticleIndex, self).__init__(ds, dataset_type)
+
+ def _setup_geometry(self):
+ self._setup_data_io()
+
+ if self.real_ds._instantiated_index is None:
+ template = self.real_ds.filename_template
+ ndoms = self.real_ds.file_count
+ cls = self.real_ds._file_class
+ self.data_files = \
+ [cls(self.dataset, self.io, template % {'num':i}, i)
+ for i in range(ndoms)]
+ else:
+ self.data_files = self.real_ds.index.data_files
+
+ self._calculate_particle_index_starts()
+ self._calculate_particle_count()
+ self._create_halo_id_table()
+
+ def _create_halo_id_table(self):
+ """
+ Create a list of halo start ids so we know which file
+ contains particles for a given halo. Note, the halo ids
+ are distributed over all files and so the ids for a given
+ halo are likely stored in a different file than the halo
+ itself.
+ """
+
+ self._halo_id_number = np.array([data_file.total_ids
+ for data_file in self.data_files])
+ self._halo_id_end = self._halo_id_number.cumsum()
+ self._halo_id_start = self._halo_id_end - self._halo_id_number
+
+ self._group_length_sum = \
+ np.array([data_file.group_length_sum
+ for data_file in self.data_files])
+
+ def _detect_output_fields(self):
+ field_list = []
+ scalar_field_list = []
+ units = {}
+ found_fields = \
+ dict([(ptype, False)
+ for ptype, pnum in self.particle_count.items()
+ if pnum > 0])
+
+ for data_file in self.data_files:
+ fl, sl, _units = self.io._identify_fields(data_file)
+ units.update(_units)
+ field_list.extend([f for f in fl
+ if f not in field_list])
+ scalar_field_list.extend([f for f in sl
+ if f not in scalar_field_list])
+ for ptype in found_fields:
+ found_fields[ptype] |= data_file.total_particles[ptype]
+ if all(found_fields.values()): break
+
+ self.field_list = field_list
+ self.scalar_field_list = scalar_field_list
+ ds = self.dataset
+ ds.scalar_field_list = scalar_field_list
+ ds.particle_types = tuple(set(pt for pt, ds in field_list))
+ ds.field_units.update(units)
+ ds.particle_types_raw = ds.particle_types
+
+ def _identify_base_chunk(self, dobj):
+ pass
+
+ def _read_particle_fields(self, fields, dobj, chunk = None):
+ if len(fields) == 0: return {}, []
+ fields_to_read, fields_to_generate = self._split_fields(fields)
+ if len(fields_to_read) == 0:
+ return {}, fields_to_generate
+ fields_to_return = self.io._read_particle_selection(
+ dobj, fields_to_read)
+ return fields_to_return, fields_to_generate
+
+ def _get_halo_file_indices(self, ptype, identifiers):
+ return np.digitize(identifiers,
+ self._halo_index_start[ptype], right=False) - 1
+
+ def _get_halo_scalar_index(self, ptype, identifier):
+ i_scalar = self._get_halo_file_indices(ptype, [identifier])[0]
+ scalar_index = identifier - self._halo_index_start[ptype][i_scalar]
+ return scalar_index
+
+ def _get_halo_values(self, ptype, identifiers, fields,
+ f=None):
+ """
+ Get field values for halos. IDs are likely to be
+ sequential (or at least monotonic), but not necessarily
+ all within the same file.
+
+ This does not do much to minimize file i/o, but with
+ halos randomly distributed across files, there's not
+ much more we can do.
+ """
+
+ # if a file is already open, don't open it again
+ filename = None if f is None \
+ else f.filename
+
+ data = defaultdict(lambda: np.empty(identifiers.size))
+ i_scalars = self._get_halo_file_indices(ptype, identifiers)
+ for i_scalar in np.unique(i_scalars):
+ target = i_scalars == i_scalar
+ scalar_indices = identifiers - \
+ self._halo_index_start[ptype][i_scalar]
+
+ # only open file if it's not already open
+ my_f = f if self.data_files[i_scalar].filename == filename \
+ else h5py.File(self.data_files[i_scalar].filename, "r")
+
+ for field in fields:
+ data[field][target] = \
+ my_f[os.path.join(ptype, field)].value[scalar_indices[target]]
+
+ if self.data_files[i_scalar].filename != filename: my_f.close()
+
+ return data
+
+class GadgetFOFHaloDataset(Dataset):
+ _index_class = GadgetFOFHaloParticleIndex
+ _file_class = GadgetFOFHDF5File
+ _field_info_class = GadgetFOFHaloFieldInfo
+
+ def __init__(self, ds, dataset_type="gadget_fof_halo_hdf5"):
+ self.real_ds = ds
+ self.particle_types_raw = self.real_ds.particle_types_raw
+ self.particle_types = self.particle_types_raw
+
+ super(GadgetFOFHaloDataset, self).__init__(
+ self.real_ds.parameter_filename, dataset_type)
+
+ def print_key_parameters(self):
+ pass
+
+ def _set_derived_attrs(self):
+ pass
+
+ def _parse_parameter_file(self):
+ for attr in ["cosmological_simulation", "cosmology",
+ "current_redshift", "current_time",
+ "dimensionality", "domain_dimensions",
+ "domain_left_edge", "domain_right_edge",
+ "domain_width", "hubble_constant",
+ "omega_lambda", "omega_matter",
+ "unique_identifier"]:
+ setattr(self, attr, getattr(self.real_ds, attr))
+
+ def set_code_units(self):
+ for unit in ["length", "time", "mass",
+ "velocity", "magnetic", "temperature"]:
+ my_unit = "%s_unit" % unit
+ setattr(self, my_unit, getattr(self.real_ds, my_unit, None))
+ self.unit_registry = self.real_ds.unit_registry
+
+ def __repr__(self):
+ return "%s" % self.real_ds
+
+ def _setup_classes(self):
+ self.objects = []
+
+class GagdetFOFHaloContainer(YTSelectionContainer):
+ """
+ Create a data container to get member particles and individual
+ values from halos and subhalos. Halo mass, position, and
+ velocity are set as attributes. Halo IDs are accessible
+ through the field, "member_ids". Other fields that are one
+ value per halo are accessible as normal. The field list for
+ halo objects can be seen in `ds.halos_field_list`.
+
+ Parameters
+ ----------
+ ptype : string
+ The type of halo, either "Group" for the main halo or
+ "Subhalo" for subhalos.
+ particle_identifier : int or tuple of (int, int)
+ The halo or subhalo id. If requesting a subhalo, the id
+ can also be given as a tuple of the main halo id and
+ subgroup id, such as (1, 4) for subgroup 4 of halo 1.
+
+ Halo Container Attributes
+ -------------------------
+ particle_identifier : int
+ The id of the halo or subhalo.
+ group_identifier : int
+ For subhalos, the id of the enclosing halo.
+ subgroup_identifier : int
+ For subhalos, the relative id of the subhalo within
+ the enclosing halo.
+ particle_number : int
+ Number of particles in the halo.
+ mass : float
+ Halo mass.
+ position : array of floats
+ Halo position.
+ velocity : array of floats
+ Halo velocity.
+
+ Relevant Fields
+ ---------------
+ particle_number :
+ number of particles
+ subhalo_number :
+ number of subhalos
+ group_identifier :
+ id of parent group for subhalos
+
+ Examples
+ --------
+
+ >>> import yt
+ >>> ds = yt.load("gadget_halos/data/groups_298/fof_subhalo_tab_298.0.hdf5")
+ >>>
+ >>> halo = ds.halo("Group", 0)
+ >>> print halo.mass
+ 13256.5517578 code_mass
+ >>> print halo.position
+ [ 16.18603706 6.95965052 12.52694607] code_length
+ >>> print halo.velocity
+ [ 6943694.22793569 -762788.90647454 -794749.63819757] cm/s
+ >>> print halo["Group_R_Crit200"]
+ [ 0.79668683] code_length
+ >>>
+ >>> # particle ids for this halo
+ >>> print halo["member_ids"]
+ [ 723631. 690744. 854212. ..., 608589. 905551. 1147449.] dimensionless
+ >>>
+ >>> # get the first subhalo of this halo
+ >>> subhalo = ds.halo("Subhalo", (0, 0))
+ >>> print subhalo["member_ids"]
+ [ 723631. 690744. 854212. ..., 808362. 956359. 1248821.] dimensionless
+
+ """
+
+ _type_name = "halo"
+ _con_args = ("ptype", "particle_identifier")
+ _spatial = False
+
+ def __init__(self, ptype, particle_identifier, ds=None):
+ if ptype not in ds.particle_types_raw:
+ raise RuntimeError("Possible halo types are %s, supplied \"%s\"." %
+ (ds.particle_types_raw, ptype))
+
+ self.ptype = ptype
+ self._current_particle_type = ptype
+ super(GagdetFOFHaloContainer, self).__init__(ds, {})
+
+ if ptype == "Subhalo" and isinstance(particle_identifier, tuple):
+ self.group_identifier, self.subgroup_identifier = \
+ particle_identifier
+ my_data = self.index._get_halo_values(
+ "Group", np.array([self.group_identifier]),
+ ["GroupFirstSub"])
+ self.particle_identifier = \
+ np.int64(my_data["GroupFirstSub"][0] + self.subgroup_identifier)
+ else:
+ self.particle_identifier = particle_identifier
+
+ if self.particle_identifier >= self.index.particle_count[ptype]:
+ raise RuntimeError("%s %d requested, but only %d %s objects exist." %
+ (ptype, particle_identifier,
+ self.index.particle_count[ptype], ptype))
+
+ # Find the file that has the scalar values for this halo.
+ i_scalar = self.index._get_halo_file_indices(
+ ptype, [self.particle_identifier])[0]
+ self.scalar_data_file = self.index.data_files[i_scalar]
+
+ # index within halo arrays that corresponds to this halo
+ self.scalar_index = self.index._get_halo_scalar_index(
+ ptype, self.particle_identifier)
+
+ halo_fields = ["%sLen" % ptype]
+ if ptype == "Subhalo": halo_fields.append("SubhaloGrNr")
+ my_data = self.index._get_halo_values(
+ ptype, np.array([self.particle_identifier]),
+ halo_fields)
+ self.particle_number = np.int64(my_data["%sLen" % ptype][0])
+
+ if ptype == "Group":
+ self.group_identifier = self.particle_identifier
+ id_offset = 0
+ # index of file that has scalar values for the group
+ g_scalar = i_scalar
+ group_index = self.scalar_index
+
+ # If a subhalo, find the index of the parent.
+ elif ptype == "Subhalo":
+ self.group_identifier = np.int64(my_data["SubhaloGrNr"][0])
+
+ # Find the file that has the scalar values for the parent group.
+ g_scalar = self.index._get_halo_file_indices(
+ "Group", [self.group_identifier])[0]
+
+ # index within halo arrays that corresponds to the paent group
+ group_index = self.index._get_halo_scalar_index(
+ "Group", self.group_identifier)
+
+ my_data = self.index._get_halo_values(
+ "Group", np.array([self.group_identifier]),
+ ["GroupNsubs", "GroupFirstSub"])
+ self.subgroup_identifier = self.particle_identifier - \
+ np.int64(my_data["GroupFirstSub"][0])
+ parent_subhalos = my_data["GroupNsubs"][0]
+
+ mylog.debug("Subhalo %d is subgroup %s of %d in group %d." % \
+ (self.particle_identifier, self.subgroup_identifier,
+ parent_subhalos, self.group_identifier))
+
+ # ids of the sibling subhalos that come before this one
+ if self.subgroup_identifier > 0:
+ sub_ids = np.arange(
+ self.particle_identifier - self.subgroup_identifier,
+ self.particle_identifier)
+ my_data = self.index._get_halo_values(
+ "Subhalo", sub_ids, ["SubhaloLen"])
+ id_offset = my_data["SubhaloLen"].sum(dtype=np.int64)
+ else:
+ id_offset = 0
+
+ # Calculate the starting index for the member particles.
+ # First, add up all the particles in the earlier files.
+ all_id_start = self.index._group_length_sum[:g_scalar].sum(dtype=np.int64)
+
+ # Now add the halos in this file that come before.
+ with h5py.File(self.index.data_files[g_scalar].filename, "r") as f:
+ all_id_start += f["Group"]["GroupLen"][:group_index].sum(dtype=np.int64)
+
+ # Add the subhalo offset.
+ all_id_start += id_offset
+
+ # indices of first and last files containing member particles
+ i_start = np.digitize([all_id_start],
+ self.index._halo_id_start,
+ right=False)[0] - 1
+ i_end = np.digitize([all_id_start+self.particle_number],
+ self.index._halo_id_end,
+ right=True)[0]
+ self.field_data_files = self.index.data_files[i_start:i_end+1]
+
+ # starting and ending indices for each file containing particles
+ self.field_data_start = \
+ (all_id_start -
+ self.index._halo_id_start[i_start:i_end+1]).clip(min=0)
+ self.field_data_start = self.field_data_start.astype(np.int64)
+ self.field_data_end = \
+ (all_id_start + self.particle_number -
+ self.index._halo_id_start[i_start:i_end+1]).clip(
+ max=self.index._halo_id_number[i_start:i_end+1])
+ self.field_data_end = self.field_data_end.astype(np.int64)
+
+ for attr in ["mass", "position", "velocity"]:
+ setattr(self, attr, self[self.ptype, "particle_%s" % attr][0])
+
+ def __repr__(self):
+ return "%s_%s_%09d" % \
+ (self.ds, self.ptype, self.particle_identifier)
diff -r 440902ffb795b23c044a09e96c3d1f2e1539273c -r 39e17fbf3a8ff16dcf79ed51b15437a5f2f23a4c yt/frontends/gadget_fof/fields.py
--- a/yt/frontends/gadget_fof/fields.py
+++ b/yt/frontends/gadget_fof/fields.py
@@ -21,28 +21,70 @@
p_units = "code_length"
v_units = "code_velocity"
+_pnums = 6
+_type_fields = \
+ tuple(("%s%sType_%d" % (ptype, field, pnum), (units, [], None))
+ for pnum in range(_pnums)
+ for field, units in (("Mass", m_units), ("Len", p_units))
+ for ptype in ("Group", "Subhalo"))
+_sub_type_fields = \
+ tuple(("Subhalo%sType_%d" % (field, pnum), (units, [], None))
+ for pnum in range(_pnums)
+ for field, units in (("HalfmassRad", p_units),
+ ("MassInHalfRad", m_units),
+ ("MassInMaxRad", m_units),
+ ("MassInRad", m_units)))
+
+_particle_fields = (
+ ("GroupPos_0", (p_units, ["Group", "particle_position_x"], None)),
+ ("GroupPos_1", (p_units, ["Group", "particle_position_y"], None)),
+ ("GroupPos_2", (p_units, ["Group", "particle_position_z"], None)),
+ ("GroupVel_0", (v_units, ["Group", "particle_velocity_x"], None)),
+ ("GroupVel_1", (v_units, ["Group", "particle_velocity_y"], None)),
+ ("GroupVel_2", (v_units, ["Group", "particle_velocity_z"], None)),
+ ("GroupMass", (m_units, ["Group", "particle_mass"], None)),
+ ("GroupLen", ("", ["Group", "particle_number"], None)),
+ ("GroupNsubs", ("", ["Group", "subhalo_number"], None)),
+ ("GroupFirstSub", ("", [], None)),
+ ("Group_M_Crit200", (m_units, [], None)),
+ ("Group_M_Crit500", (m_units, [], None)),
+ ("Group_M_Mean200", (m_units, [], None)),
+ ("Group_M_TopHat200", (m_units, [], None)),
+ ("Group_R_Crit200", (p_units, [], None)),
+ ("Group_R_Crit500", (p_units, [], None)),
+ ("Group_R_Mean200", (p_units, [], None)),
+ ("Group_R_TopHat200", (p_units, [], None)),
+ ("SubhaloPos_0", (p_units, ["Subhalo", "particle_position_x"], None)),
+ ("SubhaloPos_1", (p_units, ["Subhalo", "particle_position_y"], None)),
+ ("SubhaloPos_2", (p_units, ["Subhalo", "particle_position_z"], None)),
+ ("SubhaloVel_0", (v_units, ["Subhalo", "particle_velocity_x"], None)),
+ ("SubhaloVel_1", (v_units, ["Subhalo", "particle_velocity_y"], None)),
+ ("SubhaloVel_2", (v_units, ["Subhalo", "particle_velocity_z"], None)),
+ ("SubhaloMass", (m_units, ["Subhalo", "particle_mass"], None)),
+ ("SubhaloLen", ("", ["Subhalo", "particle_number"], None)),
+ ("SubhaloCM_0", (p_units, ["Subhalo", "center_of_mass_x"], None)),
+ ("SubhaloCM_1", (p_units, ["Subhalo", "center_of_mass_y"], None)),
+ ("SubhaloCM_2", (p_units, ["Subhalo", "center_of_mass_z"], None)),
+ ("SubhaloSpin_0", ("", ["Subhalo", "spin_x"], None)),
+ ("SubhaloSpin_1", ("", ["Subhalo", "spin_y"], None)),
+ ("SubhaloSpin_2", ("", ["Subhalo", "spin_z"], None)),
+ ("SubhaloGrNr", ("", ["Subhalo", "group_identifier"], None)),
+ ("SubhaloHalfmassRad", (p_units, [], None)),
+ ("SubhaloIDMostbound", ("", [], None)),
+ ("SubhaloMassInHalfRad", (m_units, [], None)),
+ ("SubhaloMassInMaxRad", (m_units, [], None)),
+ ("SubhaloMassInRad", (m_units, [], None)),
+ ("SubhaloParent", ("", [], None)),
+ ("SubhaloVelDisp", (v_units, ["Subhalo", "velocity_dispersion"], None)),
+ ("SubhaloVmax", (v_units, [], None)),
+ ("SubhaloVmaxRad", (p_units, [], None)),
+) + _type_fields + _sub_type_fields
+
class GadgetFOFFieldInfo(FieldInfoContainer):
known_other_fields = (
)
- known_particle_fields = (
- ("GroupPos_0", (p_units, ["Group", "particle_position_x"], None)),
- ("GroupPos_1", (p_units, ["Group", "particle_position_y"], None)),
- ("GroupPos_2", (p_units, ["Group", "particle_position_z"], None)),
- ("GroupVel_0", (v_units, ["Group", "particle_velocity_x"], None)),
- ("GroupVel_1", (v_units, ["Group", "particle_velocity_y"], None)),
- ("GroupVel_2", (v_units, ["Group", "particle_velocity_z"], None)),
- ("GroupMass", (m_units, ["Group", "particle_mass"], None)),
- ("GroupLen", ("", ["Group", "particle_number"], None)),
- ("SubhaloPos_0", (p_units, ["Subhalo", "particle_position_x"], None)),
- ("SubhaloPos_1", (p_units, ["Subhalo", "particle_position_y"], None)),
- ("SubhaloPos_2", (p_units, ["Subhalo", "particle_position_z"], None)),
- ("SubhaloVel_0", (v_units, ["Subhalo", "particle_velocity_x"], None)),
- ("SubhaloVel_1", (v_units, ["Subhalo", "particle_velocity_y"], None)),
- ("SubhaloVel_2", (v_units, ["Subhalo", "particle_velocity_z"], None)),
- ("SubhaloMass", (m_units, ["Subhalo", "particle_mass"], None)),
- ("SubhaloLen", ("", ["Subhalo", "particle_number"], None)),
- )
+ known_particle_fields = _particle_fields
# these are extra fields to be created for the "all" particle type
extra_union_fields = (
@@ -56,3 +98,10 @@
("", "particle_number"),
("", "particle_ones"),
)
+
+class GadgetFOFHaloFieldInfo(FieldInfoContainer):
+ known_other_fields = (
+ )
+
+ known_particle_fields = _particle_fields + \
+ (("ID", ("", ["member_ids"], None)),)
diff -r 440902ffb795b23c044a09e96c3d1f2e1539273c -r 39e17fbf3a8ff16dcf79ed51b15437a5f2f23a4c yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -14,16 +14,17 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
+from collections import defaultdict
from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
-from yt.utilities.exceptions import YTDomainOverflow
from yt.funcs import mylog
-
+from yt.utilities.exceptions import \
+ YTDomainOverflow
from yt.utilities.io_handler import \
BaseIOHandler
-
-from yt.utilities.lib.geometry_utils import compute_morton
+from yt.utilities.lib.geometry_utils import \
+ compute_morton
class IOHandlerGadgetFOFHDF5(BaseIOHandler):
_dataset_type = "gadget_fof_hdf5"
@@ -55,7 +56,8 @@
def _read_offset_particle_field(self, field, data_file, fh):
field_data = np.empty(data_file.total_particles["Group"], dtype="float64")
- fofindex = np.arange(data_file.total_particles["Group"]) + data_file.index_start["Group"]
+ fofindex = np.arange(data_file.total_particles["Group"]) + \
+ data_file.index_start["Group"]
for offset_file in data_file.offset_files:
if fh.filename == offset_file.filename:
ofh = fh
@@ -105,7 +107,6 @@
field_data = f[ptype][fname].value.astype("float64")
my_div = field_data.size / pcount
if my_div > 1:
- field_data = np.resize(field_data, (pcount, my_div))
findex = int(field[field.rfind("_") + 1:])
field_data = field_data[:, findex]
data = field_data[mask]
@@ -149,11 +150,7 @@
return morton
def _count_particles(self, data_file):
- with h5py.File(data_file.filename, "r") as f:
- pcount = {"Group": f["Header"].attrs["Ngroups_ThisFile"],
- "Subhalo": f["Header"].attrs["Nsubgroups_ThisFile"]}
- data_file.total_offset = 0 # need to figure out how subfind works here
- return pcount
+ return data_file.total_particles
def _identify_fields(self, data_file):
fields = []
@@ -169,6 +166,154 @@
self.offset_fields = self.offset_fields.union(set(my_offset_fields))
return fields, {}
+class IOHandlerGadgetFOFHaloHDF5(IOHandlerGadgetFOFHDF5):
+ _dataset_type = "gadget_fof_halo_hdf5"
+
+ def _read_particle_coords(self, chunks, ptf):
+ pass
+
+ def _read_particle_selection(self, dobj, fields):
+ rv = {}
+ ind = {}
+ # We first need a set of masks for each particle type
+ ptf = defaultdict(list) # ON-DISK TO READ
+ fsize = defaultdict(lambda: 0) # COUNT RV
+ field_maps = defaultdict(list) # ptypes -> fields
+ unions = self.ds.particle_unions
+ # What we need is a mapping from particle types to return types
+ for field in fields:
+ ftype, fname = field
+ fsize[field] = 0
+ # We should add a check for p.fparticle_unions or something here
+ if ftype in unions:
+ for pt in unions[ftype]:
+ ptf[pt].append(fname)
+ field_maps[pt, fname].append(field)
+ else:
+ ptf[ftype].append(fname)
+ field_maps[field].append(field)
+
+ # Now we allocate
+ psize = {dobj.ptype: dobj.particle_number}
+ for field in fields:
+ if field[0] in unions:
+ for pt in unions[field[0]]:
+ fsize[field] += psize.get(pt, 0)
+ else:
+ fsize[field] += psize.get(field[0], 0)
+ for field in fields:
+ if field[1] in self._vector_fields:
+ shape = (fsize[field], self._vector_fields[field[1]])
+ elif field[1] in self._array_fields:
+ shape = (fsize[field],)+self._array_fields[field[1]]
+ elif field in self.ds.scalar_field_list:
+ shape = (1,)
+ else:
+ shape = (fsize[field], )
+ rv[field] = np.empty(shape, dtype="float64")
+ ind[field] = 0
+ # Now we read.
+ for field_r, vals in self._read_particle_fields(dobj, ptf):
+ # Note that we now need to check the mappings
+ for field_f in field_maps[field_r]:
+ my_ind = ind[field_f]
+ rv[field_f][my_ind:my_ind + vals.shape[0],...] = vals
+ ind[field_f] += vals.shape[0]
+ # Now we need to truncate all our fields, since we allow for
+ # over-estimating.
+ for field_f in ind:
+ rv[field_f] = rv[field_f][:ind[field_f]]
+ return rv
+
+ def _read_scalar_fields(self, dobj, scalar_fields):
+ all_data = {}
+ if not scalar_fields: return all_data
+ pcount = 1
+ with h5py.File(dobj.scalar_data_file.filename, "r") as f:
+ for ptype, field_list in sorted(scalar_fields.items()):
+ for field in field_list:
+ if field == "particle_identifier":
+ field_data = \
+ np.arange(dobj.scalar_data_file.total_particles[ptype]) + \
+ dobj.scalar_data_file.index_start[ptype]
+ elif field in f[ptype]:
+ field_data = f[ptype][field].value.astype("float64")
+ else:
+ fname = field[:field.rfind("_")]
+ field_data = f[ptype][fname].value.astype("float64")
+ my_div = field_data.size / pcount
+ if my_div > 1:
+ findex = int(field[field.rfind("_") + 1:])
+ field_data = field_data[:, findex]
+ data = np.array([field_data[dobj.scalar_index]])
+ all_data[(ptype, field)] = data
+ return all_data
+
+ def _read_member_fields(self, dobj, member_fields):
+ all_data = defaultdict(lambda: np.empty(dobj.particle_number,
+ dtype=np.float64))
+ if not member_fields: return all_data
+ field_start = 0
+ for i, data_file in enumerate(dobj.field_data_files):
+ start_index = dobj.field_data_start[i]
+ end_index = dobj.field_data_end[i]
+ pcount = end_index - start_index
+ field_end = field_start + end_index - start_index
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype, field_list in sorted(member_fields.items()):
+ for field in field_list:
+ field_data = all_data[(ptype, field)]
+ if field in f["IDs"]:
+ my_data = \
+ f["IDs"][field][start_index:end_index].astype("float64")
+ else:
+ fname = field[:field.rfind("_")]
+ my_data = \
+ f["IDs"][fname][start_index:end_index].astype("float64")
+ my_div = my_data.size / pcount
+ if my_div > 1:
+ findex = int(field[field.rfind("_") + 1:])
+ my_data = my_data[:, findex]
+ field_data[field_start:field_end] = my_data
+ field_start = field_end
+ return all_data
+
+ def _read_particle_fields(self, dobj, ptf):
+ # separate member particle fields from scalar fields
+ scalar_fields = defaultdict(list)
+ member_fields = defaultdict(list)
+ for ptype, field_list in sorted(ptf.items()):
+ for field in field_list:
+ if (ptype, field) in self.ds.scalar_field_list:
+ scalar_fields[ptype].append(field)
+ else:
+ member_fields[ptype].append(field)
+
+ all_data = self._read_scalar_fields(dobj, scalar_fields)
+ all_data.update(self._read_member_fields(dobj, member_fields))
+
+ for field, field_data in all_data.items():
+ yield field, field_data
+
+ def _identify_fields(self, data_file):
+ fields = []
+ scalar_fields = []
+ pcount = data_file.total_particles
+ if sum(pcount.values()) == 0: return fields, scalar_fields, {}
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype in self.ds.particle_types_raw:
+ if data_file.total_particles[ptype] == 0: continue
+ fields.append((ptype, "particle_identifier"))
+ scalar_fields.append((ptype, "particle_identifier"))
+ my_fields, my_offset_fields = \
+ subfind_field_list(f[ptype], ptype, data_file.total_particles)
+ fields.extend(my_fields)
+ scalar_fields.extend(my_fields)
+
+ if "IDs" not in f: continue
+ fields.extend([(ptype, field) for field in f["IDs"]])
+ return fields, scalar_fields, {}
+
def subfind_field_list(fh, ptype, pcount):
fields = []
offset_fields = []
@@ -187,12 +332,12 @@
fields.append((ptype, "%s_%d" % (fname, i)))
else:
fields.append((ptype, fname))
- elif ptype == "Subfind" and \
- not fh[field].size % fh["/Subfind"].attrs["Number_of_groups"]:
+ elif ptype == "Subhalo" and \
+ not fh[field].size % fh["/Subhalo"].attrs["Number_of_groups"]:
# These are actually Group fields, but they were written after
# a load balancing step moved halos around and thus they do not
# correspond to the halos stored in the Group group.
- my_div = fh[field].size / fh["/Subfind"].attrs["Number_of_groups"]
+ my_div = fh[field].size / fh["/Subhalo"].attrs["Number_of_groups"]
fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
if my_div > 1:
for i in range(int(my_div)):
diff -r 440902ffb795b23c044a09e96c3d1f2e1539273c -r 39e17fbf3a8ff16dcf79ed51b15437a5f2f23a4c yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -13,15 +13,18 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import os.path
+import numpy as np
+
+from yt.frontends.gadget_fof.api import \
+ GadgetFOFDataset
from yt.testing import \
requires_file, \
- assert_equal
+ assert_equal, \
+ assert_array_equal
from yt.utilities.answer_testing.framework import \
FieldValuesTest, \
requires_ds, \
data_dir_load
-from yt.frontends.gadget_fof.api import GadgetFOFDataset
_fields = ("particle_position_x", "particle_position_y",
"particle_position_z", "particle_velocity_x",
@@ -35,19 +38,51 @@
@requires_ds(g5)
def test_fields_g5():
- ds = data_dir_load(g5)
- yield assert_equal, str(ds), os.path.basename(g5)
for field in _fields:
yield FieldValuesTest(g5, field, particle_type=True)
@requires_ds(g42)
def test_fields_g42():
- ds = data_dir_load(g42)
- yield assert_equal, str(ds), os.path.basename(g42)
for field in _fields:
yield FieldValuesTest(g42, field, particle_type=True)
@requires_file(g42)
def test_GadgetFOFDataset():
assert isinstance(data_dir_load(g42), GadgetFOFDataset)
+
+# fof/subhalo catalog with member particles
+g298 = "gadget_halos/data/groups_298/fof_subhalo_tab_298.0.hdf5"
+
+ at requires_file(g298)
+def test_subhalos():
+ ds = data_dir_load(g298)
+ total_sub = 0
+ total_int = 0
+ for hid in range(0, ds.index.particle_count["Group"]):
+ my_h = ds.halo("Group", hid)
+ h_ids = my_h["ID"]
+ for sid in range(my_h["subhalo_number"]):
+ my_s = ds.halo("Subhalo", (my_h.particle_identifier, sid))
+ total_sub += my_s["ID"].size
+ total_int += np.intersect1d(h_ids, my_s["ID"]).size
+
+ # Test that all subhalo particles are contained within
+ # their parent group.
+ yield assert_equal, total_sub, total_int
+
+ at requires_file(g298)
+def test_halo_masses():
+ ds = data_dir_load(g298)
+ ad = ds.all_data()
+ for ptype in ["Group", "Subhalo"]:
+ nhalos = ds.index.particle_count[ptype]
+ mass = ds.arr(np.zeros(nhalos), "code_mass")
+ for i in range(nhalos):
+ halo = ds.halo(ptype, i)
+ mass[i] = halo.mass
+
+ # Check that masses from halo containers are the same
+ # as the array of all masses. This will test getting
+ # scalar fields for halos correctly.
+ yield assert_array_equal, ad[ptype, "particle_mass"], mass
https://bitbucket.org/yt_analysis/yt/commits/334b9f4c728e/
Changeset: 334b9f4c728e
Branch: yt
User: brittonsmith
Date: 2016-01-28 10:40:55+00:00
Summary: Removing unused import.
Affected #: 1 file
diff -r 39e17fbf3a8ff16dcf79ed51b15437a5f2f23a4c -r 334b9f4c728e63ab7c7bcd387177305c6acc6bf3 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -38,8 +38,6 @@
ParticleIndex
from yt.utilities.cosmology import \
Cosmology
-from yt.utilities.exceptions import \
- YTException
from yt.utilities.logger import ytLogger as \
mylog
https://bitbucket.org/yt_analysis/yt/commits/323a5fd343c2/
Changeset: 323a5fd343c2
Branch: yt
User: MatthewTurk
Date: 2016-02-03 17:15:07+00:00
Summary: Merged in brittonsmith/yt (pull request #1921)
Adding halo data containers for gadget_fof frontend.
Affected #: 6 files
diff -r 9cd43900dce52ab4ff5f59d63b28a5bd6ccdd92c -r 323a5fd343c253c2211eb385d8f5a87a241ad7d1 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1232,10 +1232,9 @@
yt has support for reading halo catalogs produced by Rockstar and the inline
FOF/SUBFIND halo finders of Gadget and OWLS. The halo catalogs are treated as
-particle datasets where each particle represents a single halo. At this time,
-yt does not have the ability to load the member particles for a given halo.
-However, once loaded, further halo analysis can be performed using
-:ref:`halo_catalog`.
+particle datasets where each particle represents a single halo. Member particles
+for individual halos can be accessed through halo data containers. Further halo
+analysis can be performed using :ref:`halo_catalog`.
In the case where halo catalogs are written to multiple files, one must only
give the path to one of them.
@@ -1269,11 +1268,39 @@
# x component of the spin
print(ad["Subhalo", "SubhaloSpin_0"])
+Halo member particles are accessed by creating halo data containers with the
+type of halo ("Group" or "Subhalo") and the halo id. Scalar values for halos
+can be accessed in the same way. Halos also have mass, position, and velocity
+attributes.
+
+.. code-block:: python
+
+ halo = ds.halo("Group", 0)
+ # member particles for this halo
+ print halo["member_ids"]
+ # halo virial radius
+ print halo["Group_R_Crit200"]
+ # halo mass
+ print halo.mass
+
+Subhalos containers can be created using either their absolute ids or their
+subhalo ids.
+
+.. code-block:: python
+
+ # first subhalo of the first halo
+ subhalo = ds.halo("Subhalo", (0, 0))
+ # this subhalo's absolute id
+ print subhalo.group_identifier
+ # member particles
+ print subhalo["member_ids"]
+
OWLS FOF/SUBFIND
^^^^^^^^^^^^^^^^
OWLS halo catalogs have a very similar structure to regular Gadget halo catalogs.
-The two field types are "FOF" and "SUBFIND".
+The two field types are "FOF" and "SUBFIND". At this time, halo member particles
+cannot be loaded.
.. code-block:: python
diff -r 9cd43900dce52ab4ff5f59d63b28a5bd6ccdd92c -r 323a5fd343c253c2211eb385d8f5a87a241ad7d1 yt/frontends/gadget_fof/api.py
--- a/yt/frontends/gadget_fof/api.py
+++ b/yt/frontends/gadget_fof/api.py
@@ -15,12 +15,19 @@
#-----------------------------------------------------------------------------
from .data_structures import \
- GadgetFOFDataset
+ GadgetFOFParticleIndex, \
+ GadgetFOFHDF5File, \
+ GadgetFOFDataset, \
+ GadgetFOFHaloParticleIndex, \
+ GadgetFOFHaloDataset, \
+ GagdetFOFHaloContainer
from .io import \
- IOHandlerGadgetFOFHDF5
+ IOHandlerGadgetFOFHDF5, \
+ IOHandlerGadgetFOFHaloHDF5
from .fields import \
- GadgetFOFFieldInfo
+ GadgetFOFFieldInfo, \
+ GadgetFOFHaloFieldInfo
from . import tests
diff -r 9cd43900dce52ab4ff5f59d63b28a5bd6ccdd92c -r 323a5fd343c253c2211eb385d8f5a87a241ad7d1 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -15,35 +15,44 @@
#-----------------------------------------------------------------------------
from collections import defaultdict
+from functools import partial
from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import stat
-import glob
import os
+import weakref
-from yt.funcs import only_on_root
-from yt.frontends.gadget_fof.fields import \
- GadgetFOFFieldInfo
-
-from yt.utilities.cosmology import \
- Cosmology
-from yt.utilities.exceptions import \
- YTException
-from yt.utilities.logger import ytLogger as \
- mylog
-from yt.geometry.particle_geometry_handler import \
- ParticleIndex
+from yt.data_objects.data_containers import \
+ YTSelectionContainer
from yt.data_objects.static_output import \
Dataset, \
ParticleFile
from yt.frontends.gadget.data_structures import \
_fix_unit_ordering
-
+from yt.frontends.gadget_fof.fields import \
+ GadgetFOFFieldInfo, \
+ GadgetFOFHaloFieldInfo
+from yt.funcs import \
+ only_on_root
+from yt.geometry.particle_geometry_handler import \
+ ParticleIndex
+from yt.utilities.cosmology import \
+ Cosmology
+from yt.utilities.logger import ytLogger as \
+ mylog
class GadgetFOFParticleIndex(ParticleIndex):
def __init__(self, ds, dataset_type):
super(GadgetFOFParticleIndex, self).__init__(ds, dataset_type)
+ def _calculate_particle_count(self):
+ """
+ Calculate the total number of each type of particle.
+ """
+ self.particle_count = \
+ dict([(ptype, sum([d.total_particles[ptype] for d in self.data_files]))
+ for ptype in self.ds.particle_types_raw])
+
def _calculate_particle_index_starts(self):
# Halo indices are not saved in the file, so we must count by hand.
# File 0 has halos 0 to N_0 - 1, file 1 has halos N_0 to N_0 + N_1 - 1, etc.
@@ -57,9 +66,14 @@
particle_count[ptype] += data_file.total_particles[ptype]
offset_count += data_file.total_offset
+ self._halo_index_start = \
+ dict([(ptype, np.array([data_file.index_start[ptype]
+ for data_file in self.data_files]))
+ for ptype in self.ds.particle_types_raw])
+
def _calculate_file_offset_map(self):
- # After the FOF is performed, a load-balancing step redistributes halos
- # and then writes more fields. Here, for each file, we create a list of
+ # After the FOF is performed, a load-balancing step redistributes halos
+ # and then writes more fields. Here, for each file, we create a list of
# files which contain the rest of the redistributed particles.
ifof = np.array([data_file.total_particles["Group"]
for data_file in self.data_files])
@@ -73,40 +87,54 @@
data_file.offset_files = self.data_files[istart[i]: iend[i] + 1]
def _detect_output_fields(self):
- # TODO: Add additional fields
- dsl = []
+ field_list = []
units = {}
- for dom in self.data_files:
- fl, _units = self.io._identify_fields(dom)
+ found_fields = \
+ dict([(ptype, False)
+ for ptype, pnum in self.particle_count.items()
+ if pnum > 0])
+
+ for data_file in self.data_files:
+ fl, _units = self.io._identify_fields(data_file)
units.update(_units)
- dom._calculate_offsets(fl)
- for f in fl:
- if f not in dsl: dsl.append(f)
- self.field_list = dsl
+ field_list.extend([f for f in fl if f not in field_list])
+ for ptype in found_fields:
+ found_fields[ptype] |= data_file.total_particles[ptype]
+ if all(found_fields.values()): break
+
+ self.field_list = field_list
ds = self.dataset
- ds.particle_types = tuple(set(pt for pt, ds in dsl))
- # This is an attribute that means these particle types *actually*
- # exist. As in, they are real, in the dataset.
+ ds.particle_types = tuple(set(pt for pt, ds in field_list))
ds.field_units.update(units)
- ds.particle_types_raw = tuple(sorted(ds.particle_types))
-
+ ds.particle_types_raw = ds.particle_types
+
def _setup_geometry(self):
super(GadgetFOFParticleIndex, self)._setup_geometry()
+ self._calculate_particle_count()
self._calculate_particle_index_starts()
self._calculate_file_offset_map()
-
+
class GadgetFOFHDF5File(ParticleFile):
def __init__(self, ds, io, filename, file_id):
+ with h5py.File(filename, "r") as f:
+ self.header = \
+ dict((str(field), val)
+ for field, val in f["Header"].attrs.items())
+ self.group_length_sum = f["Group/GroupLen"].value.sum() \
+ if "Group/GroupLen" in f else 0
+ self.group_subs_sum = f["Group/GroupNsubs"].value.sum() \
+ if "Group/GroupNsubs" in f else 0
+ self.total_ids = self.header["Nids_ThisFile"]
+ self.total_particles = \
+ {"Group": self.header["Ngroups_ThisFile"],
+ "Subhalo": self.header["Nsubgroups_ThisFile"]}
+ self.total_offset = 0 # I think this is no longer needed
super(GadgetFOFHDF5File, self).__init__(ds, io, filename, file_id)
- with h5py.File(filename, "r") as f:
- self.header = dict((field, f.attrs[field]) \
- for field in f.attrs.keys())
-
+
class GadgetFOFDataset(Dataset):
_index_class = GadgetFOFParticleIndex
_file_class = GadgetFOFHDF5File
_field_info_class = GadgetFOFFieldInfo
- _suffix = ".hdf5"
def __init__(self, filename, dataset_type="gadget_fof_hdf5",
n_ref=16, over_refine_factor=1,
@@ -122,13 +150,36 @@
raise RuntimeError("units_override is not supported for GadgetFOFDataset. "+
"Use unit_base instead.")
super(GadgetFOFDataset, self).__init__(filename, dataset_type,
- units_override=units_override)
+ units_override=units_override)
+
+ def add_field(self, *args, **kwargs):
+ super(GadgetFOFDataset, self).add_field(*args, **kwargs)
+ self._halos_ds.add_field(*args, **kwargs)
+
+ @property
+ def halos_field_list(self):
+ return self._halos_ds.field_list
+
+ @property
+ def halos_derived_field_list(self):
+ return self._halos_ds.derived_field_list
+
+ _instantiated_halo_ds = None
+ @property
+ def _halos_ds(self):
+ if self._instantiated_halo_ds is None:
+ self._instantiated_halo_ds = GadgetFOFHaloDataset(self)
+ return self._instantiated_halo_ds
+
+ def _setup_classes(self):
+ super(GadgetFOFDataset, self)._setup_classes()
+ self.halo = partial(GagdetFOFHaloContainer, ds=self._halos_ds)
def _parse_parameter_file(self):
- handle = h5py.File(self.parameter_filename, mode="r")
- hvals = {}
- hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
- hvals["NumFiles"] = hvals["NumFiles"]
+ with h5py.File(self.parameter_filename,"r") as f:
+ self.parameters = \
+ dict((str(field), val)
+ for field, val in f["Header"].attrs.items())
self.dimensionality = 3
self.refine_by = 2
@@ -137,35 +188,29 @@
# Set standard values
self.domain_left_edge = np.zeros(3, "float64")
- self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
+ self.domain_right_edge = np.ones(3, "float64") * \
+ self.parameters["BoxSize"]
nz = 1 << self.over_refine_factor
self.domain_dimensions = np.ones(3, "int32") * nz
self.cosmological_simulation = 1
self.periodicity = (True, True, True)
- self.current_redshift = hvals["Redshift"]
- self.omega_lambda = hvals["OmegaLambda"]
- self.omega_matter = hvals["Omega0"]
- self.hubble_constant = hvals["HubbleParam"]
-
+ self.current_redshift = self.parameters["Redshift"]
+ self.omega_lambda = self.parameters["OmegaLambda"]
+ self.omega_matter = self.parameters["Omega0"]
+ self.hubble_constant = self.parameters["HubbleParam"]
cosmology = Cosmology(hubble_constant=self.hubble_constant,
omega_matter=self.omega_matter,
omega_lambda=self.omega_lambda)
self.current_time = cosmology.t_from_z(self.current_redshift)
- self.parameters = hvals
prefix = os.path.abspath(
os.path.join(os.path.dirname(self.parameter_filename),
os.path.basename(self.parameter_filename).split(".", 1)[0]))
-
suffix = self.parameter_filename.rsplit(".", 1)[-1]
self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix)
- self.file_count = len(glob.glob(prefix + "*" + self._suffix))
- if self.file_count == 0:
- raise YTException(message="No data files found.", ds=self)
+ self.file_count = self.parameters["NumFiles"]
self.particle_types = ("Group", "Subhalo")
self.particle_types_raw = ("Group", "Subhalo")
-
- handle.close()
def _set_code_unit_attributes(self):
# Set a sane default for cosmological simulations.
@@ -223,6 +268,9 @@
time_unit = (1., "s")
self.time_unit = self.quan(time_unit[0], time_unit[1])
+ def __repr__(self):
+ return self.basename.split(".", 1)[0]
+
@classmethod
def _is_valid(self, *args, **kwargs):
need_groups = ['Group', 'Header', 'Subhalo']
@@ -237,3 +285,366 @@
valid = False
pass
return valid
+
+class GadgetFOFHaloParticleIndex(GadgetFOFParticleIndex):
+ def __init__(self, ds, dataset_type):
+ self.real_ds = weakref.proxy(ds.real_ds)
+ super(GadgetFOFHaloParticleIndex, self).__init__(ds, dataset_type)
+
+ def _setup_geometry(self):
+ self._setup_data_io()
+
+ if self.real_ds._instantiated_index is None:
+ template = self.real_ds.filename_template
+ ndoms = self.real_ds.file_count
+ cls = self.real_ds._file_class
+ self.data_files = \
+ [cls(self.dataset, self.io, template % {'num':i}, i)
+ for i in range(ndoms)]
+ else:
+ self.data_files = self.real_ds.index.data_files
+
+ self._calculate_particle_index_starts()
+ self._calculate_particle_count()
+ self._create_halo_id_table()
+
+ def _create_halo_id_table(self):
+ """
+ Create a list of halo start ids so we know which file
+ contains particles for a given halo. Note, the halo ids
+ are distributed over all files and so the ids for a given
+ halo are likely stored in a different file than the halo
+ itself.
+ """
+
+ self._halo_id_number = np.array([data_file.total_ids
+ for data_file in self.data_files])
+ self._halo_id_end = self._halo_id_number.cumsum()
+ self._halo_id_start = self._halo_id_end - self._halo_id_number
+
+ self._group_length_sum = \
+ np.array([data_file.group_length_sum
+ for data_file in self.data_files])
+
+ def _detect_output_fields(self):
+ field_list = []
+ scalar_field_list = []
+ units = {}
+ found_fields = \
+ dict([(ptype, False)
+ for ptype, pnum in self.particle_count.items()
+ if pnum > 0])
+
+ for data_file in self.data_files:
+ fl, sl, _units = self.io._identify_fields(data_file)
+ units.update(_units)
+ field_list.extend([f for f in fl
+ if f not in field_list])
+ scalar_field_list.extend([f for f in sl
+ if f not in scalar_field_list])
+ for ptype in found_fields:
+ found_fields[ptype] |= data_file.total_particles[ptype]
+ if all(found_fields.values()): break
+
+ self.field_list = field_list
+ self.scalar_field_list = scalar_field_list
+ ds = self.dataset
+ ds.scalar_field_list = scalar_field_list
+ ds.particle_types = tuple(set(pt for pt, ds in field_list))
+ ds.field_units.update(units)
+ ds.particle_types_raw = ds.particle_types
+
+ def _identify_base_chunk(self, dobj):
+ pass
+
+ def _read_particle_fields(self, fields, dobj, chunk = None):
+ if len(fields) == 0: return {}, []
+ fields_to_read, fields_to_generate = self._split_fields(fields)
+ if len(fields_to_read) == 0:
+ return {}, fields_to_generate
+ fields_to_return = self.io._read_particle_selection(
+ dobj, fields_to_read)
+ return fields_to_return, fields_to_generate
+
+ def _get_halo_file_indices(self, ptype, identifiers):
+ return np.digitize(identifiers,
+ self._halo_index_start[ptype], right=False) - 1
+
+ def _get_halo_scalar_index(self, ptype, identifier):
+ i_scalar = self._get_halo_file_indices(ptype, [identifier])[0]
+ scalar_index = identifier - self._halo_index_start[ptype][i_scalar]
+ return scalar_index
+
+ def _get_halo_values(self, ptype, identifiers, fields,
+ f=None):
+ """
+ Get field values for halos. IDs are likely to be
+ sequential (or at least monotonic), but not necessarily
+ all within the same file.
+
+ This does not do much to minimize file i/o, but with
+ halos randomly distributed across files, there's not
+ much more we can do.
+ """
+
+ # if a file is already open, don't open it again
+ filename = None if f is None \
+ else f.filename
+
+ data = defaultdict(lambda: np.empty(identifiers.size))
+ i_scalars = self._get_halo_file_indices(ptype, identifiers)
+ for i_scalar in np.unique(i_scalars):
+ target = i_scalars == i_scalar
+ scalar_indices = identifiers - \
+ self._halo_index_start[ptype][i_scalar]
+
+ # only open file if it's not already open
+ my_f = f if self.data_files[i_scalar].filename == filename \
+ else h5py.File(self.data_files[i_scalar].filename, "r")
+
+ for field in fields:
+ data[field][target] = \
+ my_f[os.path.join(ptype, field)].value[scalar_indices[target]]
+
+ if self.data_files[i_scalar].filename != filename: my_f.close()
+
+ return data
+
+class GadgetFOFHaloDataset(Dataset):
+ _index_class = GadgetFOFHaloParticleIndex
+ _file_class = GadgetFOFHDF5File
+ _field_info_class = GadgetFOFHaloFieldInfo
+
+ def __init__(self, ds, dataset_type="gadget_fof_halo_hdf5"):
+ self.real_ds = ds
+ self.particle_types_raw = self.real_ds.particle_types_raw
+ self.particle_types = self.particle_types_raw
+
+ super(GadgetFOFHaloDataset, self).__init__(
+ self.real_ds.parameter_filename, dataset_type)
+
+ def print_key_parameters(self):
+ pass
+
+ def _set_derived_attrs(self):
+ pass
+
+ def _parse_parameter_file(self):
+ for attr in ["cosmological_simulation", "cosmology",
+ "current_redshift", "current_time",
+ "dimensionality", "domain_dimensions",
+ "domain_left_edge", "domain_right_edge",
+ "domain_width", "hubble_constant",
+ "omega_lambda", "omega_matter",
+ "unique_identifier"]:
+ setattr(self, attr, getattr(self.real_ds, attr))
+
+ def set_code_units(self):
+ for unit in ["length", "time", "mass",
+ "velocity", "magnetic", "temperature"]:
+ my_unit = "%s_unit" % unit
+ setattr(self, my_unit, getattr(self.real_ds, my_unit, None))
+ self.unit_registry = self.real_ds.unit_registry
+
+ def __repr__(self):
+ return "%s" % self.real_ds
+
+ def _setup_classes(self):
+ self.objects = []
+
+class GagdetFOFHaloContainer(YTSelectionContainer):
+ """
+ Create a data container to get member particles and individual
+ values from halos and subhalos. Halo mass, position, and
+ velocity are set as attributes. Halo IDs are accessible
+ through the field, "member_ids". Other fields that are one
+ value per halo are accessible as normal. The field list for
+ halo objects can be seen in `ds.halos_field_list`.
+
+ Parameters
+ ----------
+ ptype : string
+ The type of halo, either "Group" for the main halo or
+ "Subhalo" for subhalos.
+ particle_identifier : int or tuple of (int, int)
+ The halo or subhalo id. If requesting a subhalo, the id
+ can also be given as a tuple of the main halo id and
+ subgroup id, such as (1, 4) for subgroup 4 of halo 1.
+
+ Halo Container Attributes
+ -------------------------
+ particle_identifier : int
+ The id of the halo or subhalo.
+ group_identifier : int
+ For subhalos, the id of the enclosing halo.
+ subgroup_identifier : int
+ For subhalos, the relative id of the subhalo within
+ the enclosing halo.
+ particle_number : int
+ Number of particles in the halo.
+ mass : float
+ Halo mass.
+ position : array of floats
+ Halo position.
+ velocity : array of floats
+ Halo velocity.
+
+ Relevant Fields
+ ---------------
+ particle_number :
+ number of particles
+ subhalo_number :
+ number of subhalos
+ group_identifier :
+ id of parent group for subhalos
+
+ Examples
+ --------
+
+ >>> import yt
+ >>> ds = yt.load("gadget_halos/data/groups_298/fof_subhalo_tab_298.0.hdf5")
+ >>>
+ >>> halo = ds.halo("Group", 0)
+ >>> print halo.mass
+ 13256.5517578 code_mass
+ >>> print halo.position
+ [ 16.18603706 6.95965052 12.52694607] code_length
+ >>> print halo.velocity
+ [ 6943694.22793569 -762788.90647454 -794749.63819757] cm/s
+ >>> print halo["Group_R_Crit200"]
+ [ 0.79668683] code_length
+ >>>
+ >>> # particle ids for this halo
+ >>> print halo["member_ids"]
+ [ 723631. 690744. 854212. ..., 608589. 905551. 1147449.] dimensionless
+ >>>
+ >>> # get the first subhalo of this halo
+ >>> subhalo = ds.halo("Subhalo", (0, 0))
+ >>> print subhalo["member_ids"]
+ [ 723631. 690744. 854212. ..., 808362. 956359. 1248821.] dimensionless
+
+ """
+
+ _type_name = "halo"
+ _con_args = ("ptype", "particle_identifier")
+ _spatial = False
+
+ def __init__(self, ptype, particle_identifier, ds=None):
+ if ptype not in ds.particle_types_raw:
+ raise RuntimeError("Possible halo types are %s, supplied \"%s\"." %
+ (ds.particle_types_raw, ptype))
+
+ self.ptype = ptype
+ self._current_particle_type = ptype
+ super(GagdetFOFHaloContainer, self).__init__(ds, {})
+
+ if ptype == "Subhalo" and isinstance(particle_identifier, tuple):
+ self.group_identifier, self.subgroup_identifier = \
+ particle_identifier
+ my_data = self.index._get_halo_values(
+ "Group", np.array([self.group_identifier]),
+ ["GroupFirstSub"])
+ self.particle_identifier = \
+ np.int64(my_data["GroupFirstSub"][0] + self.subgroup_identifier)
+ else:
+ self.particle_identifier = particle_identifier
+
+ if self.particle_identifier >= self.index.particle_count[ptype]:
+ raise RuntimeError("%s %d requested, but only %d %s objects exist." %
+ (ptype, particle_identifier,
+ self.index.particle_count[ptype], ptype))
+
+ # Find the file that has the scalar values for this halo.
+ i_scalar = self.index._get_halo_file_indices(
+ ptype, [self.particle_identifier])[0]
+ self.scalar_data_file = self.index.data_files[i_scalar]
+
+ # index within halo arrays that corresponds to this halo
+ self.scalar_index = self.index._get_halo_scalar_index(
+ ptype, self.particle_identifier)
+
+ halo_fields = ["%sLen" % ptype]
+ if ptype == "Subhalo": halo_fields.append("SubhaloGrNr")
+ my_data = self.index._get_halo_values(
+ ptype, np.array([self.particle_identifier]),
+ halo_fields)
+ self.particle_number = np.int64(my_data["%sLen" % ptype][0])
+
+ if ptype == "Group":
+ self.group_identifier = self.particle_identifier
+ id_offset = 0
+ # index of file that has scalar values for the group
+ g_scalar = i_scalar
+ group_index = self.scalar_index
+
+ # If a subhalo, find the index of the parent.
+ elif ptype == "Subhalo":
+ self.group_identifier = np.int64(my_data["SubhaloGrNr"][0])
+
+ # Find the file that has the scalar values for the parent group.
+ g_scalar = self.index._get_halo_file_indices(
+ "Group", [self.group_identifier])[0]
+
+ # index within halo arrays that corresponds to the paent group
+ group_index = self.index._get_halo_scalar_index(
+ "Group", self.group_identifier)
+
+ my_data = self.index._get_halo_values(
+ "Group", np.array([self.group_identifier]),
+ ["GroupNsubs", "GroupFirstSub"])
+ self.subgroup_identifier = self.particle_identifier - \
+ np.int64(my_data["GroupFirstSub"][0])
+ parent_subhalos = my_data["GroupNsubs"][0]
+
+ mylog.debug("Subhalo %d is subgroup %s of %d in group %d." % \
+ (self.particle_identifier, self.subgroup_identifier,
+ parent_subhalos, self.group_identifier))
+
+ # ids of the sibling subhalos that come before this one
+ if self.subgroup_identifier > 0:
+ sub_ids = np.arange(
+ self.particle_identifier - self.subgroup_identifier,
+ self.particle_identifier)
+ my_data = self.index._get_halo_values(
+ "Subhalo", sub_ids, ["SubhaloLen"])
+ id_offset = my_data["SubhaloLen"].sum(dtype=np.int64)
+ else:
+ id_offset = 0
+
+ # Calculate the starting index for the member particles.
+ # First, add up all the particles in the earlier files.
+ all_id_start = self.index._group_length_sum[:g_scalar].sum(dtype=np.int64)
+
+ # Now add the halos in this file that come before.
+ with h5py.File(self.index.data_files[g_scalar].filename, "r") as f:
+ all_id_start += f["Group"]["GroupLen"][:group_index].sum(dtype=np.int64)
+
+ # Add the subhalo offset.
+ all_id_start += id_offset
+
+ # indices of first and last files containing member particles
+ i_start = np.digitize([all_id_start],
+ self.index._halo_id_start,
+ right=False)[0] - 1
+ i_end = np.digitize([all_id_start+self.particle_number],
+ self.index._halo_id_end,
+ right=True)[0]
+ self.field_data_files = self.index.data_files[i_start:i_end+1]
+
+ # starting and ending indices for each file containing particles
+ self.field_data_start = \
+ (all_id_start -
+ self.index._halo_id_start[i_start:i_end+1]).clip(min=0)
+ self.field_data_start = self.field_data_start.astype(np.int64)
+ self.field_data_end = \
+ (all_id_start + self.particle_number -
+ self.index._halo_id_start[i_start:i_end+1]).clip(
+ max=self.index._halo_id_number[i_start:i_end+1])
+ self.field_data_end = self.field_data_end.astype(np.int64)
+
+ for attr in ["mass", "position", "velocity"]:
+ setattr(self, attr, self[self.ptype, "particle_%s" % attr][0])
+
+ def __repr__(self):
+ return "%s_%s_%09d" % \
+ (self.ds, self.ptype, self.particle_identifier)
diff -r 9cd43900dce52ab4ff5f59d63b28a5bd6ccdd92c -r 323a5fd343c253c2211eb385d8f5a87a241ad7d1 yt/frontends/gadget_fof/fields.py
--- a/yt/frontends/gadget_fof/fields.py
+++ b/yt/frontends/gadget_fof/fields.py
@@ -21,28 +21,70 @@
p_units = "code_length"
v_units = "code_velocity"
+_pnums = 6
+_type_fields = \
+ tuple(("%s%sType_%d" % (ptype, field, pnum), (units, [], None))
+ for pnum in range(_pnums)
+ for field, units in (("Mass", m_units), ("Len", p_units))
+ for ptype in ("Group", "Subhalo"))
+_sub_type_fields = \
+ tuple(("Subhalo%sType_%d" % (field, pnum), (units, [], None))
+ for pnum in range(_pnums)
+ for field, units in (("HalfmassRad", p_units),
+ ("MassInHalfRad", m_units),
+ ("MassInMaxRad", m_units),
+ ("MassInRad", m_units)))
+
+_particle_fields = (
+ ("GroupPos_0", (p_units, ["Group", "particle_position_x"], None)),
+ ("GroupPos_1", (p_units, ["Group", "particle_position_y"], None)),
+ ("GroupPos_2", (p_units, ["Group", "particle_position_z"], None)),
+ ("GroupVel_0", (v_units, ["Group", "particle_velocity_x"], None)),
+ ("GroupVel_1", (v_units, ["Group", "particle_velocity_y"], None)),
+ ("GroupVel_2", (v_units, ["Group", "particle_velocity_z"], None)),
+ ("GroupMass", (m_units, ["Group", "particle_mass"], None)),
+ ("GroupLen", ("", ["Group", "particle_number"], None)),
+ ("GroupNsubs", ("", ["Group", "subhalo_number"], None)),
+ ("GroupFirstSub", ("", [], None)),
+ ("Group_M_Crit200", (m_units, [], None)),
+ ("Group_M_Crit500", (m_units, [], None)),
+ ("Group_M_Mean200", (m_units, [], None)),
+ ("Group_M_TopHat200", (m_units, [], None)),
+ ("Group_R_Crit200", (p_units, [], None)),
+ ("Group_R_Crit500", (p_units, [], None)),
+ ("Group_R_Mean200", (p_units, [], None)),
+ ("Group_R_TopHat200", (p_units, [], None)),
+ ("SubhaloPos_0", (p_units, ["Subhalo", "particle_position_x"], None)),
+ ("SubhaloPos_1", (p_units, ["Subhalo", "particle_position_y"], None)),
+ ("SubhaloPos_2", (p_units, ["Subhalo", "particle_position_z"], None)),
+ ("SubhaloVel_0", (v_units, ["Subhalo", "particle_velocity_x"], None)),
+ ("SubhaloVel_1", (v_units, ["Subhalo", "particle_velocity_y"], None)),
+ ("SubhaloVel_2", (v_units, ["Subhalo", "particle_velocity_z"], None)),
+ ("SubhaloMass", (m_units, ["Subhalo", "particle_mass"], None)),
+ ("SubhaloLen", ("", ["Subhalo", "particle_number"], None)),
+ ("SubhaloCM_0", (p_units, ["Subhalo", "center_of_mass_x"], None)),
+ ("SubhaloCM_1", (p_units, ["Subhalo", "center_of_mass_y"], None)),
+ ("SubhaloCM_2", (p_units, ["Subhalo", "center_of_mass_z"], None)),
+ ("SubhaloSpin_0", ("", ["Subhalo", "spin_x"], None)),
+ ("SubhaloSpin_1", ("", ["Subhalo", "spin_y"], None)),
+ ("SubhaloSpin_2", ("", ["Subhalo", "spin_z"], None)),
+ ("SubhaloGrNr", ("", ["Subhalo", "group_identifier"], None)),
+ ("SubhaloHalfmassRad", (p_units, [], None)),
+ ("SubhaloIDMostbound", ("", [], None)),
+ ("SubhaloMassInHalfRad", (m_units, [], None)),
+ ("SubhaloMassInMaxRad", (m_units, [], None)),
+ ("SubhaloMassInRad", (m_units, [], None)),
+ ("SubhaloParent", ("", [], None)),
+ ("SubhaloVelDisp", (v_units, ["Subhalo", "velocity_dispersion"], None)),
+ ("SubhaloVmax", (v_units, [], None)),
+ ("SubhaloVmaxRad", (p_units, [], None)),
+) + _type_fields + _sub_type_fields
+
class GadgetFOFFieldInfo(FieldInfoContainer):
known_other_fields = (
)
- known_particle_fields = (
- ("GroupPos_0", (p_units, ["Group", "particle_position_x"], None)),
- ("GroupPos_1", (p_units, ["Group", "particle_position_y"], None)),
- ("GroupPos_2", (p_units, ["Group", "particle_position_z"], None)),
- ("GroupVel_0", (v_units, ["Group", "particle_velocity_x"], None)),
- ("GroupVel_1", (v_units, ["Group", "particle_velocity_y"], None)),
- ("GroupVel_2", (v_units, ["Group", "particle_velocity_z"], None)),
- ("GroupMass", (m_units, ["Group", "particle_mass"], None)),
- ("GroupLen", ("", ["Group", "particle_number"], None)),
- ("SubhaloPos_0", (p_units, ["Subhalo", "particle_position_x"], None)),
- ("SubhaloPos_1", (p_units, ["Subhalo", "particle_position_y"], None)),
- ("SubhaloPos_2", (p_units, ["Subhalo", "particle_position_z"], None)),
- ("SubhaloVel_0", (v_units, ["Subhalo", "particle_velocity_x"], None)),
- ("SubhaloVel_1", (v_units, ["Subhalo", "particle_velocity_y"], None)),
- ("SubhaloVel_2", (v_units, ["Subhalo", "particle_velocity_z"], None)),
- ("SubhaloMass", (m_units, ["Subhalo", "particle_mass"], None)),
- ("SubhaloLen", ("", ["Subhalo", "particle_number"], None)),
- )
+ known_particle_fields = _particle_fields
# these are extra fields to be created for the "all" particle type
extra_union_fields = (
@@ -56,3 +98,10 @@
("", "particle_number"),
("", "particle_ones"),
)
+
+class GadgetFOFHaloFieldInfo(FieldInfoContainer):
+ known_other_fields = (
+ )
+
+ known_particle_fields = _particle_fields + \
+ (("ID", ("", ["member_ids"], None)),)
diff -r 9cd43900dce52ab4ff5f59d63b28a5bd6ccdd92c -r 323a5fd343c253c2211eb385d8f5a87a241ad7d1 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -14,16 +14,17 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
+from collections import defaultdict
from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
-from yt.utilities.exceptions import YTDomainOverflow
from yt.funcs import mylog
-
+from yt.utilities.exceptions import \
+ YTDomainOverflow
from yt.utilities.io_handler import \
BaseIOHandler
-
-from yt.utilities.lib.geometry_utils import compute_morton
+from yt.utilities.lib.geometry_utils import \
+ compute_morton
class IOHandlerGadgetFOFHDF5(BaseIOHandler):
_dataset_type = "gadget_fof_hdf5"
@@ -55,7 +56,8 @@
def _read_offset_particle_field(self, field, data_file, fh):
field_data = np.empty(data_file.total_particles["Group"], dtype="float64")
- fofindex = np.arange(data_file.total_particles["Group"]) + data_file.index_start["Group"]
+ fofindex = np.arange(data_file.total_particles["Group"]) + \
+ data_file.index_start["Group"]
for offset_file in data_file.offset_files:
if fh.filename == offset_file.filename:
ofh = fh
@@ -105,7 +107,6 @@
field_data = f[ptype][fname].value.astype("float64")
my_div = field_data.size / pcount
if my_div > 1:
- field_data = np.resize(field_data, (pcount, my_div))
findex = int(field[field.rfind("_") + 1:])
field_data = field_data[:, findex]
data = field_data[mask]
@@ -149,11 +150,7 @@
return morton
def _count_particles(self, data_file):
- with h5py.File(data_file.filename, "r") as f:
- pcount = {"Group": f["Header"].attrs["Ngroups_ThisFile"],
- "Subhalo": f["Header"].attrs["Nsubgroups_ThisFile"]}
- data_file.total_offset = 0 # need to figure out how subfind works here
- return pcount
+ return data_file.total_particles
def _identify_fields(self, data_file):
fields = []
@@ -169,6 +166,154 @@
self.offset_fields = self.offset_fields.union(set(my_offset_fields))
return fields, {}
+class IOHandlerGadgetFOFHaloHDF5(IOHandlerGadgetFOFHDF5):
+ _dataset_type = "gadget_fof_halo_hdf5"
+
+ def _read_particle_coords(self, chunks, ptf):
+ pass
+
+ def _read_particle_selection(self, dobj, fields):
+ rv = {}
+ ind = {}
+ # We first need a set of masks for each particle type
+ ptf = defaultdict(list) # ON-DISK TO READ
+ fsize = defaultdict(lambda: 0) # COUNT RV
+ field_maps = defaultdict(list) # ptypes -> fields
+ unions = self.ds.particle_unions
+ # What we need is a mapping from particle types to return types
+ for field in fields:
+ ftype, fname = field
+ fsize[field] = 0
+ # We should add a check for p.fparticle_unions or something here
+ if ftype in unions:
+ for pt in unions[ftype]:
+ ptf[pt].append(fname)
+ field_maps[pt, fname].append(field)
+ else:
+ ptf[ftype].append(fname)
+ field_maps[field].append(field)
+
+ # Now we allocate
+ psize = {dobj.ptype: dobj.particle_number}
+ for field in fields:
+ if field[0] in unions:
+ for pt in unions[field[0]]:
+ fsize[field] += psize.get(pt, 0)
+ else:
+ fsize[field] += psize.get(field[0], 0)
+ for field in fields:
+ if field[1] in self._vector_fields:
+ shape = (fsize[field], self._vector_fields[field[1]])
+ elif field[1] in self._array_fields:
+ shape = (fsize[field],)+self._array_fields[field[1]]
+ elif field in self.ds.scalar_field_list:
+ shape = (1,)
+ else:
+ shape = (fsize[field], )
+ rv[field] = np.empty(shape, dtype="float64")
+ ind[field] = 0
+ # Now we read.
+ for field_r, vals in self._read_particle_fields(dobj, ptf):
+ # Note that we now need to check the mappings
+ for field_f in field_maps[field_r]:
+ my_ind = ind[field_f]
+ rv[field_f][my_ind:my_ind + vals.shape[0],...] = vals
+ ind[field_f] += vals.shape[0]
+ # Now we need to truncate all our fields, since we allow for
+ # over-estimating.
+ for field_f in ind:
+ rv[field_f] = rv[field_f][:ind[field_f]]
+ return rv
+
+ def _read_scalar_fields(self, dobj, scalar_fields):
+ all_data = {}
+ if not scalar_fields: return all_data
+ pcount = 1
+ with h5py.File(dobj.scalar_data_file.filename, "r") as f:
+ for ptype, field_list in sorted(scalar_fields.items()):
+ for field in field_list:
+ if field == "particle_identifier":
+ field_data = \
+ np.arange(dobj.scalar_data_file.total_particles[ptype]) + \
+ dobj.scalar_data_file.index_start[ptype]
+ elif field in f[ptype]:
+ field_data = f[ptype][field].value.astype("float64")
+ else:
+ fname = field[:field.rfind("_")]
+ field_data = f[ptype][fname].value.astype("float64")
+ my_div = field_data.size / pcount
+ if my_div > 1:
+ findex = int(field[field.rfind("_") + 1:])
+ field_data = field_data[:, findex]
+ data = np.array([field_data[dobj.scalar_index]])
+ all_data[(ptype, field)] = data
+ return all_data
+
+ def _read_member_fields(self, dobj, member_fields):
+ all_data = defaultdict(lambda: np.empty(dobj.particle_number,
+ dtype=np.float64))
+ if not member_fields: return all_data
+ field_start = 0
+ for i, data_file in enumerate(dobj.field_data_files):
+ start_index = dobj.field_data_start[i]
+ end_index = dobj.field_data_end[i]
+ pcount = end_index - start_index
+ field_end = field_start + end_index - start_index
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype, field_list in sorted(member_fields.items()):
+ for field in field_list:
+ field_data = all_data[(ptype, field)]
+ if field in f["IDs"]:
+ my_data = \
+ f["IDs"][field][start_index:end_index].astype("float64")
+ else:
+ fname = field[:field.rfind("_")]
+ my_data = \
+ f["IDs"][fname][start_index:end_index].astype("float64")
+ my_div = my_data.size / pcount
+ if my_div > 1:
+ findex = int(field[field.rfind("_") + 1:])
+ my_data = my_data[:, findex]
+ field_data[field_start:field_end] = my_data
+ field_start = field_end
+ return all_data
+
+ def _read_particle_fields(self, dobj, ptf):
+ # separate member particle fields from scalar fields
+ scalar_fields = defaultdict(list)
+ member_fields = defaultdict(list)
+ for ptype, field_list in sorted(ptf.items()):
+ for field in field_list:
+ if (ptype, field) in self.ds.scalar_field_list:
+ scalar_fields[ptype].append(field)
+ else:
+ member_fields[ptype].append(field)
+
+ all_data = self._read_scalar_fields(dobj, scalar_fields)
+ all_data.update(self._read_member_fields(dobj, member_fields))
+
+ for field, field_data in all_data.items():
+ yield field, field_data
+
+ def _identify_fields(self, data_file):
+ fields = []
+ scalar_fields = []
+ pcount = data_file.total_particles
+ if sum(pcount.values()) == 0: return fields, scalar_fields, {}
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype in self.ds.particle_types_raw:
+ if data_file.total_particles[ptype] == 0: continue
+ fields.append((ptype, "particle_identifier"))
+ scalar_fields.append((ptype, "particle_identifier"))
+ my_fields, my_offset_fields = \
+ subfind_field_list(f[ptype], ptype, data_file.total_particles)
+ fields.extend(my_fields)
+ scalar_fields.extend(my_fields)
+
+ if "IDs" not in f: continue
+ fields.extend([(ptype, field) for field in f["IDs"]])
+ return fields, scalar_fields, {}
+
def subfind_field_list(fh, ptype, pcount):
fields = []
offset_fields = []
@@ -187,12 +332,12 @@
fields.append((ptype, "%s_%d" % (fname, i)))
else:
fields.append((ptype, fname))
- elif ptype == "Subfind" and \
- not fh[field].size % fh["/Subfind"].attrs["Number_of_groups"]:
+ elif ptype == "Subhalo" and \
+ not fh[field].size % fh["/Subhalo"].attrs["Number_of_groups"]:
# These are actually Group fields, but they were written after
# a load balancing step moved halos around and thus they do not
# correspond to the halos stored in the Group group.
- my_div = fh[field].size / fh["/Subfind"].attrs["Number_of_groups"]
+ my_div = fh[field].size / fh["/Subhalo"].attrs["Number_of_groups"]
fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
if my_div > 1:
for i in range(int(my_div)):
diff -r 9cd43900dce52ab4ff5f59d63b28a5bd6ccdd92c -r 323a5fd343c253c2211eb385d8f5a87a241ad7d1 yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -13,15 +13,18 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import os.path
+import numpy as np
+
+from yt.frontends.gadget_fof.api import \
+ GadgetFOFDataset
from yt.testing import \
requires_file, \
- assert_equal
+ assert_equal, \
+ assert_array_equal
from yt.utilities.answer_testing.framework import \
FieldValuesTest, \
requires_ds, \
data_dir_load
-from yt.frontends.gadget_fof.api import GadgetFOFDataset
_fields = ("particle_position_x", "particle_position_y",
"particle_position_z", "particle_velocity_x",
@@ -35,19 +38,51 @@
@requires_ds(g5)
def test_fields_g5():
- ds = data_dir_load(g5)
- yield assert_equal, str(ds), os.path.basename(g5)
for field in _fields:
yield FieldValuesTest(g5, field, particle_type=True)
@requires_ds(g42)
def test_fields_g42():
- ds = data_dir_load(g42)
- yield assert_equal, str(ds), os.path.basename(g42)
for field in _fields:
yield FieldValuesTest(g42, field, particle_type=True)
@requires_file(g42)
def test_GadgetFOFDataset():
assert isinstance(data_dir_load(g42), GadgetFOFDataset)
+
+# fof/subhalo catalog with member particles
+g298 = "gadget_halos/data/groups_298/fof_subhalo_tab_298.0.hdf5"
+
+ at requires_file(g298)
+def test_subhalos():
+ ds = data_dir_load(g298)
+ total_sub = 0
+ total_int = 0
+ for hid in range(0, ds.index.particle_count["Group"]):
+ my_h = ds.halo("Group", hid)
+ h_ids = my_h["ID"]
+ for sid in range(my_h["subhalo_number"]):
+ my_s = ds.halo("Subhalo", (my_h.particle_identifier, sid))
+ total_sub += my_s["ID"].size
+ total_int += np.intersect1d(h_ids, my_s["ID"]).size
+
+ # Test that all subhalo particles are contained within
+ # their parent group.
+ yield assert_equal, total_sub, total_int
+
+ at requires_file(g298)
+def test_halo_masses():
+ ds = data_dir_load(g298)
+ ad = ds.all_data()
+ for ptype in ["Group", "Subhalo"]:
+ nhalos = ds.index.particle_count[ptype]
+ mass = ds.arr(np.zeros(nhalos), "code_mass")
+ for i in range(nhalos):
+ halo = ds.halo(ptype, i)
+ mass[i] = halo.mass
+
+ # Check that masses from halo containers are the same
+ # as the array of all masses. This will test getting
+ # scalar fields for halos correctly.
+ yield assert_array_equal, ad[ptype, "particle_mass"], mass
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list