[yt-svn] commit/yt: 18 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Mon May 5 08:48:46 PDT 2014
18 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/c697235f08ca/
Changeset: c697235f08ca
Branch: yt-3.0
User: brittonsmith
Date: 2014-04-13 16:55:30
Summary: Adding almost functional subfind frontend.
Affected #: 7 files
diff -r 794b2beb2c48272fed16de10daa07975309b950f -r c697235f08ca2def04797514547eec5c0365bfeb yt/frontends/halo_catalogs/api.py
--- a/yt/frontends/halo_catalogs/api.py
+++ b/yt/frontends/halo_catalogs/api.py
@@ -23,3 +23,8 @@
RockstarDataset, \
IOHandlerRockstarBinary, \
RockstarFieldInfo
+
+from .subfind.api import \
+ SubfindDataset, \
+ IOHandlerSubfindHDF5, \
+ SubfindFieldInfo
diff -r 794b2beb2c48272fed16de10daa07975309b950f -r c697235f08ca2def04797514547eec5c0365bfeb yt/frontends/halo_catalogs/subfind/__init__.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/subfind/__init__.py
@@ -0,0 +1,15 @@
+"""
+API for HaloCatalog frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
diff -r 794b2beb2c48272fed16de10daa07975309b950f -r c697235f08ca2def04797514547eec5c0365bfeb yt/frontends/halo_catalogs/subfind/api.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/subfind/api.py
@@ -0,0 +1,24 @@
+"""
+API for Subfind frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+ SubfindDataset
+
+from .io import \
+ IOHandlerSubfindHDF5
+
+from .fields import \
+ SubfindFieldInfo
diff -r 794b2beb2c48272fed16de10daa07975309b950f -r c697235f08ca2def04797514547eec5c0365bfeb yt/frontends/halo_catalogs/subfind/data_structures.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/subfind/data_structures.py
@@ -0,0 +1,165 @@
+"""
+Data structures for Subfind frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+import stat
+import weakref
+import struct
+import glob
+import time
+import os
+
+from .fields import \
+ SubfindFieldInfo
+
+from yt.utilities.cosmology import Cosmology
+from yt.utilities.definitions import \
+ mpc_conversion, sec_conversion
+from yt.geometry.particle_geometry_handler import \
+ ParticleIndex
+from yt.data_objects.static_output import \
+ Dataset, \
+ ParticleFile
+from yt.frontends.sph.data_structures import \
+ _fix_unit_ordering
+import yt.utilities.fortran_utils as fpu
+from yt.units.yt_array import \
+ YTArray, \
+ YTQuantity
+
+class SubfindHDF5File(ParticleFile):
+ def __init__(self, pf, io, filename, file_id):
+ with h5py.File(filename, "r") as f:
+ self.header = dict((field, f.attrs[field]) \
+ for field in f.attrs.keys())
+
+ super(SubfindHDF5File, self).__init__(pf, io, filename, file_id)
+
+class SubfindDataset(Dataset):
+ _index_class = ParticleIndex
+ _file_class = SubfindHDF5File
+ _field_info_class = SubfindFieldInfo
+ _suffix = ".hdf5"
+
+ def __init__(self, filename, dataset_type="subfind_hdf5",
+ n_ref = 16, over_refine_factor = 1):
+ self.n_ref = n_ref
+ self.over_refine_factor = over_refine_factor
+ super(SubfindDataset, self).__init__(filename, dataset_type)
+
+ def _parse_parameter_file(self):
+ handle = h5py.File(self.parameter_filename, mode="r")
+ hvals = {}
+ hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
+ hvals["NumFiles"] = hvals["NumFilesPerSnapshot"]
+ hvals["Massarr"] = hvals["MassTable"]
+
+ self.dimensionality = 3
+ self.refine_by = 2
+ self.parameters["HydroMethod"] = "sph"
+ self.unique_identifier = \
+ int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+ # Set standard values
+ self.current_time = self.quan(hvals["Time_GYR"] * sec_conversion["Gyr"], "s")
+ self.domain_left_edge = np.zeros(3, "float64")
+ self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
+ nz = 1 << self.over_refine_factor
+ self.domain_dimensions = np.ones(3, "int32") * nz
+ self.cosmological_simulation = 1
+ self.periodicity = (True, True, True)
+ self.current_redshift = hvals["Redshift"]
+ self.omega_lambda = hvals["OmegaLambda"]
+ self.omega_matter = hvals["Omega0"]
+ self.hubble_constant = hvals["HubbleParam"]
+ self.parameters = hvals
+ prefix = os.path.abspath(
+ os.path.join(os.path.dirname(self.parameter_filename),
+ os.path.basename(self.parameter_filename).split(".", 1)[0]))
+
+ suffix = self.parameter_filename.rsplit(".", 1)[-1]
+ self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix)
+ self.file_count = len(glob.glob(prefix + "*" + self._suffix))
+ self.particle_types = ("FOF", "SUBFIND")
+ self.particle_types_raw = ("FOF", "SUBFIND")
+
+ # To avoid having to open files twice
+ self._unit_base = {}
+ self._unit_base.update(
+ (str(k), v) for k, v in handle["/Units"].attrs.items())
+ # Comoving cm is given in the Units
+ self._unit_base['cmcm'] = 1.0 / self._unit_base["UnitLength_in_cm"]
+ handle.close()
+
+ def _set_code_unit_attributes(self):
+ # Set a sane default for cosmological simulations.
+ if self._unit_base is None and self.cosmological_simulation == 1:
+ mylog.info("Assuming length units are in Mpc/h (comoving)")
+ self._unit_base = dict(length = (1.0, "Mpccm/h"))
+ # The other same defaults we will use from the standard Gadget
+ # defaults.
+ unit_base = self._unit_base or {}
+ if "length" in unit_base:
+ length_unit = unit_base["length"]
+ elif "UnitLength_in_cm" in unit_base:
+ if self.cosmological_simulation == 0:
+ length_unit = (unit_base["UnitLength_in_cm"], "cm")
+ else:
+ length_unit = (unit_base["UnitLength_in_cm"], "cmcm/h")
+ else:
+ raise RuntimeError
+ length_unit = _fix_unit_ordering(length_unit)
+ self.length_unit = self.quan(length_unit[0], length_unit[1])
+
+ unit_base = self._unit_base or {}
+ if "velocity" in unit_base:
+ velocity_unit = unit_base["velocity"]
+ elif "UnitVelocity_in_cm_per_s" in unit_base:
+ velocity_unit = (unit_base["UnitVelocity_in_cm_per_s"], "cm/s")
+ else:
+ velocity_unit = (1e5, "cm/s")
+ velocity_unit = _fix_unit_ordering(velocity_unit)
+ self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
+ # We set hubble_constant = 1.0 for non-cosmology, so this is safe.
+ # Default to 1e10 Msun/h if mass is not specified.
+ if "mass" in unit_base:
+ mass_unit = unit_base["mass"]
+ elif "UnitMass_in_g" in unit_base:
+ if self.cosmological_simulation == 0:
+ mass_unit = (unit_base["UnitMass_in_g"], "g")
+ else:
+ mass_unit = (unit_base["UnitMass_in_g"], "g/h")
+ else:
+ # Sane default
+ mass_unit = (1.0, "1e10*Msun/h")
+ mass_unit = _fix_unit_ordering(mass_unit)
+ self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
+ self.time_unit = self.quan(unit_base["UnitTime_in_s"], "s")
+
+ @classmethod
+ def _is_valid(self, *args, **kwargs):
+ try:
+ fileh = h5py.File(args[0], mode='r')
+ if "Constants" in fileh["/"].keys() and \
+ "Header" in fileh["/"].keys() and \
+ "SUBFIND" in fileh["/"].keys():
+ fileh.close()
+ return True
+ fileh.close()
+ except:
+ pass
+ return False
diff -r 794b2beb2c48272fed16de10daa07975309b950f -r c697235f08ca2def04797514547eec5c0365bfeb yt/frontends/halo_catalogs/subfind/fields.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/subfind/fields.py
@@ -0,0 +1,48 @@
+"""
+Subfind-specific fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.funcs import mylog
+from yt.fields.field_info_container import \
+ FieldInfoContainer
+from yt.units.yt_array import \
+ YTArray
+
+from yt.utilities.physical_constants import \
+ mh, \
+ mass_sun_cgs
+
+m_units = "g"
+p_units = "cm"
+v_units = "cm / s"
+r_units = "cm"
+
+class SubfindFieldInfo(FieldInfoContainer):
+ known_other_fields = (
+ )
+
+ known_particle_fields = (
+ ("particle_identifier", ("", [], None)),
+ ("particle_position_x", (p_units, [], None)),
+ ("particle_position_y", (p_units, [], None)),
+ ("particle_position_z", (p_units, [], None)),
+ ("particle_velocity_x", (v_units, [], None)),
+ ("particle_velocity_y", (v_units, [], None)),
+ ("particle_velocity_z", (v_units, [], None)),
+ ("particle_mass", (m_units, [], "Virial Mass")),
+ ("virial_radius", (r_units, [], "Virial Radius")),
+)
diff -r 794b2beb2c48272fed16de10daa07975309b950f -r c697235f08ca2def04797514547eec5c0365bfeb yt/frontends/halo_catalogs/subfind/io.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/subfind/io.py
@@ -0,0 +1,123 @@
+"""
+Subfind data-file handling function
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+
+from yt.utilities.exceptions import *
+from yt.funcs import mylog
+
+from yt.utilities.io_handler import \
+ BaseIOHandler
+
+from yt.utilities.lib.geometry_utils import compute_morton
+
+from yt.geometry.oct_container import _ORDER_MAX
+
+class IOHandlerSubfindHDF5(BaseIOHandler):
+ _dataset_type = "subfind_hdf5"
+
+ def _read_fluid_selection(self, chunks, selector, fields, size):
+ raise NotImplementedError
+
+ def _read_particle_coords(self, chunks, ptf):
+ # This will read chunks and yield the results.
+ chunks = list(chunks)
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype, field_list in sorted(ptf.items()):
+ pcount = f[ptype].attrs["Number_of_groups"]
+ coords = f[ptype]["CenterOfMass"].value.astype("float64")
+ coords = np.resize(coords, (pcount, 3))
+ x = coords[:, 0]
+ y = coords[:, 1]
+ z = coords[:, 2]
+ yield ptype, (x, y, z)
+
+ def _read_particle_fields(self, chunks, ptf, selector):
+ # Now we have all the sizes, and we can allocate
+ chunks = list(chunks)
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype, field_list in sorted(ptf.items()):
+ pcount = f[ptype].attrs["Number_of_groups"]
+ coords = f[ptype]["CenterOfMass"].value.astype("float64")
+ coords = np.resize(coords, (pcount, 3))
+ x = coords[:, 0]
+ y = coords[:, 1]
+ z = coords[:, 2]
+ mask = selector.select_points(x, y, z)
+ del x, y, z
+ if mask is None: continue
+ for field in field_list:
+ data = f[ptype][field][mask].astype("float64")
+ yield (ptype, field), data
+
+ def _initialize_index(self, data_file, regions):
+ pcount = sum(self._count_particles(data_file).values())
+ morton = np.empty(pcount, dtype='uint64')
+ mylog.debug("Initializing index % 5i (% 7i particles)",
+ data_file.file_id, pcount)
+ ind = 0
+ with h5py.File(data_file.filename, "r") as f:
+ if not f.keys(): return None
+ dx = np.finfo(f["FOF"]['CenterOfMass'].dtype).eps
+ dx = 2.0*self.pf.quan(dx, "code_length")
+ for ptype in ["FOF", "SUBFIND"]:
+ my_pcount = f[ptype].attrs["Number_of_groups"]
+ pos = f[ptype]["CenterOfMass"].value.astype("float64")
+ pos = np.resize(pos, (my_pcount, 3))
+ pos = data_file.pf.arr(pos, "code_length")
+
+ # These are 32 bit numbers, so we give a little lee-way.
+ # Otherwise, for big sets of particles, we often will bump into the
+ # domain edges. This helps alleviate that.
+ np.clip(pos, self.pf.domain_left_edge + dx,
+ self.pf.domain_right_edge - dx, pos)
+ if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
+ np.any(pos.max(axis=0) > self.pf.domain_right_edge):
+ raise YTDomainOverflow(pos.min(axis=0),
+ pos.max(axis=0),
+ self.pf.domain_left_edge,
+ self.pf.domain_right_edge)
+ regions.add_data_file(pos, data_file.file_id)
+ morton[ind:ind+pos.shape[0]] = compute_morton(
+ pos[:,0], pos[:,1], pos[:,2],
+ data_file.pf.domain_left_edge,
+ data_file.pf.domain_right_edge)
+ ind += pos.shape[0]
+ return morton
+
+ def _count_particles(self, data_file):
+ with h5py.File(data_file.filename, "r") as f:
+ return dict([(ptype, f[ptype].attrs["Number_of_groups"]) \
+ for ptype in "FOF", "SUBFIND"])
+
+ def _identify_fields(self, data_file):
+ with h5py.File(data_file.filename, "r") as f:
+ fields = []
+ for ptype in ["FOF", "SUBFIND"]:
+ for ax in "xyz":
+ fields.append((ptype, "particle_position_%s" % ax))
+ return fields, {}
diff -r 794b2beb2c48272fed16de10daa07975309b950f -r c697235f08ca2def04797514547eec5c0365bfeb yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -342,7 +342,8 @@
try:
fileh = h5py.File(args[0], mode='r')
if "Constants" in fileh["/"].keys() and \
- "Header" in fileh["/"].keys():
+ "Header" in fileh["/"].keys() and \
+ "SUBFIND" not in fileh["/"].keys():
fileh.close()
return True
fileh.close()
https://bitbucket.org/yt_analysis/yt/commits/923efa785f67/
Changeset: 923efa785f67
Branch: yt-3.0
User: brittonsmith
Date: 2014-04-13 21:06:51
Summary: Fields are now properly detected for subfind frontend.
Affected #: 3 files
diff -r c697235f08ca2def04797514547eec5c0365bfeb -r 923efa785f67c701ccf50fb0310a22bb1d32f61a yt/frontends/halo_catalogs/subfind/data_structures.py
--- a/yt/frontends/halo_catalogs/subfind/data_structures.py
+++ b/yt/frontends/halo_catalogs/subfind/data_structures.py
@@ -29,6 +29,8 @@
from yt.utilities.cosmology import Cosmology
from yt.utilities.definitions import \
mpc_conversion, sec_conversion
+from yt.utilities.exceptions import \
+ YTException
from yt.geometry.particle_geometry_handler import \
ParticleIndex
from yt.data_objects.static_output import \
@@ -70,7 +72,6 @@
self.dimensionality = 3
self.refine_by = 2
- self.parameters["HydroMethod"] = "sph"
self.unique_identifier = \
int(os.stat(self.parameter_filename)[stat.ST_CTIME])
@@ -94,6 +95,8 @@
suffix = self.parameter_filename.rsplit(".", 1)[-1]
self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix)
self.file_count = len(glob.glob(prefix + "*" + self._suffix))
+ if self.file_count == 0:
+ raise YTException(message="No data files found.", pf=self)
self.particle_types = ("FOF", "SUBFIND")
self.particle_types_raw = ("FOF", "SUBFIND")
diff -r c697235f08ca2def04797514547eec5c0365bfeb -r 923efa785f67c701ccf50fb0310a22bb1d32f61a yt/frontends/halo_catalogs/subfind/io.py
--- a/yt/frontends/halo_catalogs/subfind/io.py
+++ b/yt/frontends/halo_catalogs/subfind/io.py
@@ -112,12 +112,32 @@
def _count_particles(self, data_file):
with h5py.File(data_file.filename, "r") as f:
return dict([(ptype, f[ptype].attrs["Number_of_groups"]) \
- for ptype in "FOF", "SUBFIND"])
+ for ptype in self.pf.particle_types_raw])
def _identify_fields(self, data_file):
+ pcount = self._count_particles(data_file)
+ fields = []
with h5py.File(data_file.filename, "r") as f:
- fields = []
- for ptype in ["FOF", "SUBFIND"]:
- for ax in "xyz":
- fields.append((ptype, "particle_position_%s" % ax))
+ for ptype in self.pf.particle_types_raw:
+ fields.extend(h5_field_list(f[ptype], ptype, pcount[ptype]))
return fields, {}
+
+def h5_field_list(fh, ptype, pcount):
+ fields = []
+ for field in fh.keys():
+ if isinstance(fh[field], h5py.Group):
+ fields.extend(h5_field_list(fh[field], ptype, pcount))
+ else:
+ if fh[field].size % pcount:
+ pass
+ # mylog.warn("Cannot add field (%s, %s) with size %d for %d particles." % \
+ # (ptype, field, fh[field].size, pcount))
+ else:
+ my_div = fh[field].size / pcount
+ if my_div > 1:
+ for i in range(my_div):
+ fields.append((ptype, "%s_%d" % (field, i)))
+ else:
+ fields.append((ptype, field))
+ return fields
+
diff -r c697235f08ca2def04797514547eec5c0365bfeb -r 923efa785f67c701ccf50fb0310a22bb1d32f61a yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -118,7 +118,7 @@
# TODO: Add additional fields
pfl = []
units = {}
- for dom in self.data_files:
+ for dom in self.data_files[:1]:
fl, _units = self.io._identify_fields(dom)
units.update(_units)
dom._calculate_offsets(fl)
https://bitbucket.org/yt_analysis/yt/commits/00b23bcba980/
Changeset: 00b23bcba980
Branch: yt-3.0
User: brittonsmith
Date: 2014-04-14 12:47:44
Summary: Fixing field reading.
Affected #: 1 file
diff -r 923efa785f67c701ccf50fb0310a22bb1d32f61a -r 00b23bcba980c2b20fff43426eba72f32bd09961 yt/frontends/halo_catalogs/subfind/io.py
--- a/yt/frontends/halo_catalogs/subfind/io.py
+++ b/yt/frontends/halo_catalogs/subfind/io.py
@@ -71,7 +71,17 @@
del x, y, z
if mask is None: continue
for field in field_list:
- data = f[ptype][field][mask].astype("float64")
+ if field in f[ptype].keys():
+ field_data = f[ptype][field].value.astype("float64")
+ else:
+ fname = field[:field.rfind("_")]
+ field_data = f[ptype][fname].value.astype("float64")
+ my_div = field_data.size / pcount
+ if my_div > 1:
+ field_data = np.resize(field_data, (pcount, my_div))
+ findex = int(field[field.rfind("_") + 1:])
+ field_data = field_data[:, findex]
+ data = field_data[mask]
yield (ptype, field), data
def _initialize_index(self, data_file, regions):
@@ -134,10 +144,10 @@
# (ptype, field, fh[field].size, pcount))
else:
my_div = fh[field].size / pcount
+ fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
if my_div > 1:
for i in range(my_div):
- fields.append((ptype, "%s_%d" % (field, i)))
+ fields.append((ptype, "%s_%d" % (fname, i)))
else:
- fields.append((ptype, field))
+ fields.append((ptype, fname))
return fields
-
https://bitbucket.org/yt_analysis/yt/commits/be24756d6c42/
Changeset: be24756d6c42
Branch: yt-3.0
User: brittonsmith
Date: 2014-04-14 13:26:59
Summary: Adding some field aliases.
Affected #: 1 file
diff -r 00b23bcba980c2b20fff43426eba72f32bd09961 -r be24756d6c42b6ac1771d29d5316b9cbcbe12951 yt/frontends/halo_catalogs/subfind/fields.py
--- a/yt/frontends/halo_catalogs/subfind/fields.py
+++ b/yt/frontends/halo_catalogs/subfind/fields.py
@@ -14,35 +14,40 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import numpy as np
-
from yt.funcs import mylog
from yt.fields.field_info_container import \
FieldInfoContainer
from yt.units.yt_array import \
YTArray
-from yt.utilities.physical_constants import \
- mh, \
- mass_sun_cgs
-
-m_units = "g"
-p_units = "cm"
-v_units = "cm / s"
-r_units = "cm"
+m_units = "1e10 * Msun/h"
+p_units = "Mpccm/h"
+v_units = "1e5 * cmcm / s"
class SubfindFieldInfo(FieldInfoContainer):
known_other_fields = (
)
known_particle_fields = (
- ("particle_identifier", ("", [], None)),
- ("particle_position_x", (p_units, [], None)),
- ("particle_position_y", (p_units, [], None)),
- ("particle_position_z", (p_units, [], None)),
- ("particle_velocity_x", (v_units, [], None)),
- ("particle_velocity_y", (v_units, [], None)),
- ("particle_velocity_z", (v_units, [], None)),
- ("particle_mass", (m_units, [], "Virial Mass")),
- ("virial_radius", (r_units, [], "Virial Radius")),
+ ("CenterOfMass_0", (p_units, ["particle_position_x"], None)),
+ ("CenterOfMass_1", (p_units, ["particle_position_y"], None)),
+ ("CenterOfMass_2", (p_units, ["particle_position_z"], None)),
+ ("CenterOfMassVelocity_0", (v_units, ["particle_velocity_x"], None)),
+ ("CenterOfMassVelocity_1", (v_units, ["particle_velocity_y"], None)),
+ ("CenterOfMassVelocity_2", (v_units, ["particle_velocity_z"], None)),
+ ("Mass", (m_units, ["particle_mass"], None)),
+ ("Halo_M_Crit200", (m_units, ["Virial Mass"], None)),
+ ("Halo_M_Crit2500", (m_units, [], None)),
+ ("Halo_M_Crit500", (m_units, [], None)),
+ ("Halo_M_Mean200", (m_units, [], None)),
+ ("Halo_M_Mean2500", (m_units, [], None)),
+ ("Halo_M_Mean500", (m_units, [], None)),
+ ("Halo_M_TopHat200", (m_units, [], None)),
+ ("Halo_R_Crit200", (p_units, ["Virial Radius"], None)),
+ ("Halo_R_Crit2500", (p_units, [], None)),
+ ("Halo_R_Crit500", (p_units, [], None)),
+ ("Halo_R_Mean200", (p_units, [], None)),
+ ("Halo_R_Mean2500", (p_units, [], None)),
+ ("Halo_R_Mean500", (p_units, [], None)),
+ ("Halo_R_TopHat200", (p_units, [], None)),
)
https://bitbucket.org/yt_analysis/yt/commits/860668183bb6/
Changeset: 860668183bb6
Branch: yt-3.0
User: brittonsmith
Date: 2014-04-29 18:24:14
Summary: Adding a means to calculate particle index offsets.
Affected #: 2 files
diff -r be24756d6c42b6ac1771d29d5316b9cbcbe12951 -r 860668183bb6dda273d5871cf13c9c76469cbb03 yt/frontends/halo_catalogs/subfind/data_structures.py
--- a/yt/frontends/halo_catalogs/subfind/data_structures.py
+++ b/yt/frontends/halo_catalogs/subfind/data_structures.py
@@ -14,6 +14,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
+from collections import defaultdict
import h5py
import numpy as np
import stat
@@ -42,6 +43,22 @@
from yt.units.yt_array import \
YTArray, \
YTQuantity
+
+class SubfindParticleIndex(ParticleIndex):
+ def __init__(self, pf, dataset_type):
+ super(SubfindParticleIndex, self).__init__(pf, dataset_type)
+
+ def _calculate_particle_index_offsets(self):
+ particle_count = defaultdict(int)
+ for data_file in self.data_files:
+ data_file.index_offset = dict([(ptype, particle_count[ptype]) for
+ ptype in data_file.particle_count])
+ for ptype in data_file.particle_count:
+ particle_count[ptype] += data_file.particle_count[ptype]
+
+ def _setup_geometry(self):
+ super(SubfindParticleIndex, self)._setup_geometry()
+ self._calculate_particle_index_offsets()
class SubfindHDF5File(ParticleFile):
def __init__(self, pf, io, filename, file_id):
@@ -52,7 +69,7 @@
super(SubfindHDF5File, self).__init__(pf, io, filename, file_id)
class SubfindDataset(Dataset):
- _index_class = ParticleIndex
+ _index_class = SubfindParticleIndex
_file_class = SubfindHDF5File
_field_info_class = SubfindFieldInfo
_suffix = ".hdf5"
diff -r be24756d6c42b6ac1771d29d5316b9cbcbe12951 -r 860668183bb6dda273d5871cf13c9c76469cbb03 yt/frontends/halo_catalogs/subfind/io.py
--- a/yt/frontends/halo_catalogs/subfind/io.py
+++ b/yt/frontends/halo_catalogs/subfind/io.py
@@ -121,8 +121,10 @@
def _count_particles(self, data_file):
with h5py.File(data_file.filename, "r") as f:
- return dict([(ptype, f[ptype].attrs["Number_of_groups"]) \
- for ptype in self.pf.particle_types_raw])
+ data_file.particle_count = \
+ dict([(ptype, f[ptype].attrs["Number_of_groups"])
+ for ptype in self.pf.particle_types_raw])
+ return data_file.particle_count
def _identify_fields(self, data_file):
pcount = self._count_particles(data_file)
https://bitbucket.org/yt_analysis/yt/commits/bea2455c1a42/
Changeset: bea2455c1a42
Branch: yt-3.0
User: brittonsmith
Date: 2014-04-29 22:26:44
Summary: Particle id field now works.
Affected #: 1 file
diff -r 860668183bb6dda273d5871cf13c9c76469cbb03 -r bea2455c1a4251e9821430b5c8bfddb9c7b99c36 yt/frontends/halo_catalogs/subfind/io.py
--- a/yt/frontends/halo_catalogs/subfind/io.py
+++ b/yt/frontends/halo_catalogs/subfind/io.py
@@ -71,7 +71,11 @@
del x, y, z
if mask is None: continue
for field in field_list:
- if field in f[ptype].keys():
+ if field == "particle_identifier":
+ field_data = \
+ np.arange(data_file.particle_count[ptype]) + \
+ data_file.index_offset[ptype]
+ elif field in f[ptype].keys():
field_data = f[ptype][field].value.astype("float64")
else:
fname = field[:field.rfind("_")]
@@ -128,7 +132,8 @@
def _identify_fields(self, data_file):
pcount = self._count_particles(data_file)
- fields = []
+ fields = [(ptype, "particle_identifier")
+ for ptype in self.pf.particle_types_raw]
with h5py.File(data_file.filename, "r") as f:
for ptype in self.pf.particle_types_raw:
fields.extend(h5_field_list(f[ptype], ptype, pcount[ptype]))
https://bitbucket.org/yt_analysis/yt/commits/198c6c02ef00/
Changeset: 198c6c02ef00
Branch: yt-3.0
User: brittonsmith
Date: 2014-05-01 17:23:17
Summary: Ok, now I finally understand how this works. We can now load all halo fields except the Halo_M_Crit200 type fields which correspond to halos in other files.
Affected #: 2 files
diff -r bea2455c1a4251e9821430b5c8bfddb9c7b99c36 -r 198c6c02ef0010eb151fcd39887f3230dc5d6089 yt/frontends/halo_catalogs/subfind/data_structures.py
--- a/yt/frontends/halo_catalogs/subfind/data_structures.py
+++ b/yt/frontends/halo_catalogs/subfind/data_structures.py
@@ -52,9 +52,9 @@
particle_count = defaultdict(int)
for data_file in self.data_files:
data_file.index_offset = dict([(ptype, particle_count[ptype]) for
- ptype in data_file.particle_count])
- for ptype in data_file.particle_count:
- particle_count[ptype] += data_file.particle_count[ptype]
+ ptype in data_file.total_particles])
+ for ptype in data_file.total_particles:
+ particle_count[ptype] += data_file.total_particles[ptype]
def _setup_geometry(self):
super(SubfindParticleIndex, self)._setup_geometry()
@@ -62,11 +62,10 @@
class SubfindHDF5File(ParticleFile):
def __init__(self, pf, io, filename, file_id):
+ super(SubfindHDF5File, self).__init__(pf, io, filename, file_id)
with h5py.File(filename, "r") as f:
self.header = dict((field, f.attrs[field]) \
for field in f.attrs.keys())
-
- super(SubfindHDF5File, self).__init__(pf, io, filename, file_id)
class SubfindDataset(Dataset):
_index_class = SubfindParticleIndex
diff -r bea2455c1a4251e9821430b5c8bfddb9c7b99c36 -r 198c6c02ef0010eb151fcd39887f3230dc5d6089 yt/frontends/halo_catalogs/subfind/io.py
--- a/yt/frontends/halo_catalogs/subfind/io.py
+++ b/yt/frontends/halo_catalogs/subfind/io.py
@@ -61,7 +61,7 @@
for data_file in data_files:
with h5py.File(data_file.filename, "r") as f:
for ptype, field_list in sorted(ptf.items()):
- pcount = f[ptype].attrs["Number_of_groups"]
+ pcount = data_file.total_particles[ptype]
coords = f[ptype]["CenterOfMass"].value.astype("float64")
coords = np.resize(coords, (pcount, 3))
x = coords[:, 0]
@@ -73,7 +73,7 @@
for field in field_list:
if field == "particle_identifier":
field_data = \
- np.arange(data_file.particle_count[ptype]) + \
+ np.arange(data_file.total_particles[ptype]) + \
data_file.index_offset[ptype]
elif field in f[ptype].keys():
field_data = f[ptype][field].value.astype("float64")
@@ -98,8 +98,10 @@
if not f.keys(): return None
dx = np.finfo(f["FOF"]['CenterOfMass'].dtype).eps
dx = 2.0*self.pf.quan(dx, "code_length")
- for ptype in ["FOF", "SUBFIND"]:
- my_pcount = f[ptype].attrs["Number_of_groups"]
+
+ for ptype, pattr in zip(["FOF", "SUBFIND"],
+ ["Number_of_groups", "Number_of_subgroups"]):
+ my_pcount = f[ptype].attrs[pattr]
pos = f[ptype]["CenterOfMass"].value.astype("float64")
pos = np.resize(pos, (my_pcount, 3))
pos = data_file.pf.arr(pos, "code_length")
@@ -125,36 +127,41 @@
def _count_particles(self, data_file):
with h5py.File(data_file.filename, "r") as f:
- data_file.particle_count = \
- dict([(ptype, f[ptype].attrs["Number_of_groups"])
- for ptype in self.pf.particle_types_raw])
- return data_file.particle_count
+ return {"FOF": f["FOF"].attrs["Number_of_groups"],
+ "SUBFIND": f["FOF"].attrs["Number_of_subgroups"]}
def _identify_fields(self, data_file):
- pcount = self._count_particles(data_file)
fields = [(ptype, "particle_identifier")
for ptype in self.pf.particle_types_raw]
+ pcount = data_file.total_particles
with h5py.File(data_file.filename, "r") as f:
for ptype in self.pf.particle_types_raw:
- fields.extend(h5_field_list(f[ptype], ptype, pcount[ptype]))
+ fields.extend(h5_field_list(f[ptype], ptype,
+ data_file.total_particles))
return fields, {}
def h5_field_list(fh, ptype, pcount):
fields = []
for field in fh.keys():
- if isinstance(fh[field], h5py.Group):
+ if "PartType" in field:
+ # These are halo member particles
+ continue
+ elif isinstance(fh[field], h5py.Group):
fields.extend(h5_field_list(fh[field], ptype, pcount))
else:
- if fh[field].size % pcount:
- pass
- # mylog.warn("Cannot add field (%s, %s) with size %d for %d particles." % \
- # (ptype, field, fh[field].size, pcount))
- else:
- my_div = fh[field].size / pcount
+ if not fh[field].size % pcount[ptype]:
+ my_div = fh[field].size / pcount[ptype]
fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
if my_div > 1:
for i in range(my_div):
fields.append((ptype, "%s_%d" % (fname, i)))
else:
fields.append((ptype, fname))
+ else:
+ # Some fields correspond to halos in other files.
+ # I don't know how to deal with that yet.
+ # mylog.warn("Cannot add field (%s, %s) with size %d." % \
+ # (ptype, fh[field].name, fh[field].size))
+ continue
+
return fields
https://bitbucket.org/yt_analysis/yt/commits/df872b035862/
Changeset: df872b035862
Branch: yt-3.0
User: brittonsmith
Date: 2014-05-01 17:49:43
Summary: Now we are detecting all the fields, including the special cases.
Affected #: 1 file
diff -r 198c6c02ef0010eb151fcd39887f3230dc5d6089 -r df872b035862e7bb4e4ba197302b63608f7f6ce9 yt/frontends/halo_catalogs/subfind/io.py
--- a/yt/frontends/halo_catalogs/subfind/io.py
+++ b/yt/frontends/halo_catalogs/subfind/io.py
@@ -136,18 +136,18 @@
pcount = data_file.total_particles
with h5py.File(data_file.filename, "r") as f:
for ptype in self.pf.particle_types_raw:
- fields.extend(h5_field_list(f[ptype], ptype,
- data_file.total_particles))
+ fields.extend(subfind_field_list(f[ptype], ptype,
+ data_file.total_particles))
return fields, {}
-def h5_field_list(fh, ptype, pcount):
+def subfind_field_list(fh, ptype, pcount):
fields = []
for field in fh.keys():
if "PartType" in field:
# These are halo member particles
continue
elif isinstance(fh[field], h5py.Group):
- fields.extend(h5_field_list(fh[field], ptype, pcount))
+ fields.extend(subfind_field_list(fh[field], ptype, pcount))
else:
if not fh[field].size % pcount[ptype]:
my_div = fh[field].size / pcount[ptype]
@@ -157,11 +157,22 @@
fields.append((ptype, "%s_%d" % (fname, i)))
else:
fields.append((ptype, fname))
+ elif ptype == "SUBFIND" and \
+ not fh[field].size % fh["/SUBFIND"].attrs["Number_of_groups"]:
+ # These are actually FOF fields, but they were written after
+ # a load balancing step moved halos around and thus they do not
+ # correspond to the halos stored in the FOF group.
+ my_div = fh[field].size / fh["/SUBFIND"].attrs["Number_of_groups"]
+ fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
+ if my_div > 1:
+ for i in range(my_div):
+ fields.append(("FOF", "%s_%d" % (fname, i)))
+ else:
+ fields.append(("FOF", fname))
+
else:
- # Some fields correspond to halos in other files.
- # I don't know how to deal with that yet.
- # mylog.warn("Cannot add field (%s, %s) with size %d." % \
- # (ptype, fh[field].name, fh[field].size))
+ mylog.warn("Cannot add field (%s, %s) with size %d." % \
+ (ptype, fh[field].name, fh[field].size))
continue
return fields
https://bitbucket.org/yt_analysis/yt/commits/7ce7a1cdace6/
Changeset: 7ce7a1cdace6
Branch: yt-3.0
User: brittonsmith
Date: 2014-05-01 17:51:20
Summary: Fixing particle count for SUBFIND particles.
Affected #: 1 file
diff -r df872b035862e7bb4e4ba197302b63608f7f6ce9 -r 7ce7a1cdace6149a3765602a1e52de7701a3bfc0 yt/frontends/halo_catalogs/subfind/io.py
--- a/yt/frontends/halo_catalogs/subfind/io.py
+++ b/yt/frontends/halo_catalogs/subfind/io.py
@@ -43,7 +43,7 @@
for data_file in data_files:
with h5py.File(data_file.filename, "r") as f:
for ptype, field_list in sorted(ptf.items()):
- pcount = f[ptype].attrs["Number_of_groups"]
+ pcount = data_file.total_particles[ptype]
coords = f[ptype]["CenterOfMass"].value.astype("float64")
coords = np.resize(coords, (pcount, 3))
x = coords[:, 0]
https://bitbucket.org/yt_analysis/yt/commits/cb51cff93552/
Changeset: cb51cff93552
Branch: yt-3.0
User: brittonsmith
Date: 2014-05-01 18:29:38
Summary: Now detecting and storing list of special fields.
Affected #: 1 file
diff -r 7ce7a1cdace6149a3765602a1e52de7701a3bfc0 -r cb51cff93552ddd3f44ea76d72503abfe0a253ff yt/frontends/halo_catalogs/subfind/io.py
--- a/yt/frontends/halo_catalogs/subfind/io.py
+++ b/yt/frontends/halo_catalogs/subfind/io.py
@@ -30,6 +30,10 @@
class IOHandlerSubfindHDF5(BaseIOHandler):
_dataset_type = "subfind_hdf5"
+ def __init__(self, pf):
+ super(IOHandlerSubfindHDF5, self).__init__(pf)
+ self.special_fields = set([])
+
def _read_fluid_selection(self, chunks, selector, fields, size):
raise NotImplementedError
@@ -136,18 +140,24 @@
pcount = data_file.total_particles
with h5py.File(data_file.filename, "r") as f:
for ptype in self.pf.particle_types_raw:
- fields.extend(subfind_field_list(f[ptype], ptype,
- data_file.total_particles))
+ my_fields, my_special_fields = \
+ subfind_field_list(f[ptype], ptype, data_file.total_particles)
+ fields.extend(my_fields)
+ self.special_fields = self.special_fields.union(set(my_special_fields))
return fields, {}
def subfind_field_list(fh, ptype, pcount):
fields = []
+ special_fields = []
for field in fh.keys():
if "PartType" in field:
# These are halo member particles
continue
elif isinstance(fh[field], h5py.Group):
- fields.extend(subfind_field_list(fh[field], ptype, pcount))
+ my_fields, my_special_fields = \
+ subfind_field_list(fh[field], ptype, pcount)
+ fields.extend(my_fields)
+ my_special_fields.extend(special_fields)
else:
if not fh[field].size % pcount[ptype]:
my_div = fh[field].size / pcount[ptype]
@@ -169,10 +179,9 @@
fields.append(("FOF", "%s_%d" % (fname, i)))
else:
fields.append(("FOF", fname))
-
+ special_fields.append(fname)
else:
mylog.warn("Cannot add field (%s, %s) with size %d." % \
(ptype, fh[field].name, fh[field].size))
continue
-
- return fields
+ return fields, special_fields
https://bitbucket.org/yt_analysis/yt/commits/935fdb6bcbb3/
Changeset: 935fdb6bcbb3
Branch: yt-3.0
User: brittonsmith
Date: 2014-05-02 12:43:37
Summary: Now we have a map to the locations of the offset particles.
Affected #: 2 files
diff -r cb51cff93552ddd3f44ea76d72503abfe0a253ff -r 935fdb6bcbb3b1dfdd5b6a828428af3db653ed68 yt/frontends/halo_catalogs/subfind/data_structures.py
--- a/yt/frontends/halo_catalogs/subfind/data_structures.py
+++ b/yt/frontends/halo_catalogs/subfind/data_structures.py
@@ -49,16 +49,36 @@
super(SubfindParticleIndex, self).__init__(pf, dataset_type)
def _calculate_particle_index_offsets(self):
+ # Halo indices are not saved in the file, so we must count by hand.
+ # File 0 has halos 0 to N_0 - 1, file 1 has halos N_0 to N_0 + N_1 - 1, etc.
particle_count = defaultdict(int)
for data_file in self.data_files:
data_file.index_offset = dict([(ptype, particle_count[ptype]) for
ptype in data_file.total_particles])
for ptype in data_file.total_particles:
particle_count[ptype] += data_file.total_particles[ptype]
+
+ def _calculate_file_offset_map(self):
+ # After the FOF is performed, a load-balancing step redistributes halos
+ # and then writes more fields. Here, for each file, we create a list of
+ # files which contain the rest of the redistributed particles.
+ ifof = np.array([data_file.total_particles["FOF"]
+ for data_file in self.data_files])
+ isub = np.array([data_file.total_offset
+ for data_file in self.data_files])
+ subend = isub.cumsum()
+ fofend = ifof.cumsum()
+ istart = np.digitize(fofend - ifof[0], subend - isub[0], right=False) - 1
+ iend = np.digitize(fofend, subend, right=True)
+ for i, data_file in enumerate(self.data_files):
+ data_file.offset_files = \
+ [self.parameter_file.filename_template % {'num':ind}
+ for ind in range(istart[i], iend[i] + 1)]
def _setup_geometry(self):
super(SubfindParticleIndex, self)._setup_geometry()
self._calculate_particle_index_offsets()
+ self._calculate_file_offset_map()
class SubfindHDF5File(ParticleFile):
def __init__(self, pf, io, filename, file_id):
diff -r cb51cff93552ddd3f44ea76d72503abfe0a253ff -r 935fdb6bcbb3b1dfdd5b6a828428af3db653ed68 yt/frontends/halo_catalogs/subfind/io.py
--- a/yt/frontends/halo_catalogs/subfind/io.py
+++ b/yt/frontends/halo_catalogs/subfind/io.py
@@ -32,7 +32,7 @@
def __init__(self, pf):
super(IOHandlerSubfindHDF5, self).__init__(pf)
- self.special_fields = set([])
+ self.offset_fields = set([])
def _read_fluid_selection(self, chunks, selector, fields, size):
raise NotImplementedError
@@ -75,20 +75,23 @@
del x, y, z
if mask is None: continue
for field in field_list:
- if field == "particle_identifier":
- field_data = \
- np.arange(data_file.total_particles[ptype]) + \
- data_file.index_offset[ptype]
- elif field in f[ptype].keys():
- field_data = f[ptype][field].value.astype("float64")
+ if field in self.offset_fields:
+ raise RuntimeError
else:
- fname = field[:field.rfind("_")]
- field_data = f[ptype][fname].value.astype("float64")
- my_div = field_data.size / pcount
- if my_div > 1:
- field_data = np.resize(field_data, (pcount, my_div))
- findex = int(field[field.rfind("_") + 1:])
- field_data = field_data[:, findex]
+ if field == "particle_identifier":
+ field_data = \
+ np.arange(data_file.total_particles[ptype]) + \
+ data_file.index_offset[ptype]
+ elif field in f[ptype].keys():
+ field_data = f[ptype][field].value.astype("float64")
+ else:
+ fname = field[:field.rfind("_")]
+ field_data = f[ptype][fname].value.astype("float64")
+ my_div = field_data.size / pcount
+ if my_div > 1:
+ field_data = np.resize(field_data, (pcount, my_div))
+ findex = int(field[field.rfind("_") + 1:])
+ field_data = field_data[:, findex]
data = field_data[mask]
yield (ptype, field), data
@@ -131,6 +134,8 @@
def _count_particles(self, data_file):
with h5py.File(data_file.filename, "r") as f:
+ # We need this to figure out where the offset fields are stored.
+ data_file.total_offset = f["SUBFIND"].attrs["Number_of_groups"]
return {"FOF": f["FOF"].attrs["Number_of_groups"],
"SUBFIND": f["FOF"].attrs["Number_of_subgroups"]}
@@ -140,24 +145,24 @@
pcount = data_file.total_particles
with h5py.File(data_file.filename, "r") as f:
for ptype in self.pf.particle_types_raw:
- my_fields, my_special_fields = \
+ my_fields, my_offset_fields = \
subfind_field_list(f[ptype], ptype, data_file.total_particles)
fields.extend(my_fields)
- self.special_fields = self.special_fields.union(set(my_special_fields))
+ self.offset_fields = self.offset_fields.union(set(my_offset_fields))
return fields, {}
def subfind_field_list(fh, ptype, pcount):
fields = []
- special_fields = []
+ offset_fields = []
for field in fh.keys():
if "PartType" in field:
# These are halo member particles
continue
elif isinstance(fh[field], h5py.Group):
- my_fields, my_special_fields = \
+ my_fields, my_offset_fields = \
subfind_field_list(fh[field], ptype, pcount)
fields.extend(my_fields)
- my_special_fields.extend(special_fields)
+ my_offset_fields.extend(offset_fields)
else:
if not fh[field].size % pcount[ptype]:
my_div = fh[field].size / pcount[ptype]
@@ -179,9 +184,9 @@
fields.append(("FOF", "%s_%d" % (fname, i)))
else:
fields.append(("FOF", fname))
- special_fields.append(fname)
+ offset_fields.append(fname)
else:
mylog.warn("Cannot add field (%s, %s) with size %d." % \
(ptype, fh[field].name, fh[field].size))
continue
- return fields, special_fields
+ return fields, offset_fields
https://bitbucket.org/yt_analysis/yt/commits/4f6f5cf77fd5/
Changeset: 4f6f5cf77fd5
Branch: yt-3.0
User: brittonsmith
Date: 2014-05-02 16:52:46
Summary: Offset fields now work.
Affected #: 2 files
diff -r 935fdb6bcbb3b1dfdd5b6a828428af3db653ed68 -r 4f6f5cf77fd5bb6feb33c91be233df7413764da8 yt/frontends/halo_catalogs/subfind/data_structures.py
--- a/yt/frontends/halo_catalogs/subfind/data_structures.py
+++ b/yt/frontends/halo_catalogs/subfind/data_structures.py
@@ -48,15 +48,18 @@
def __init__(self, pf, dataset_type):
super(SubfindParticleIndex, self).__init__(pf, dataset_type)
- def _calculate_particle_index_offsets(self):
+ def _calculate_particle_index_starts(self):
# Halo indices are not saved in the file, so we must count by hand.
# File 0 has halos 0 to N_0 - 1, file 1 has halos N_0 to N_0 + N_1 - 1, etc.
particle_count = defaultdict(int)
+ offset_count = 0
for data_file in self.data_files:
- data_file.index_offset = dict([(ptype, particle_count[ptype]) for
+ data_file.index_start = dict([(ptype, particle_count[ptype]) for
ptype in data_file.total_particles])
+ data_file.offset_start = offset_count
for ptype in data_file.total_particles:
particle_count[ptype] += data_file.total_particles[ptype]
+ offset_count += data_file.total_offset
def _calculate_file_offset_map(self):
# After the FOF is performed, a load-balancing step redistributes halos
@@ -68,16 +71,14 @@
for data_file in self.data_files])
subend = isub.cumsum()
fofend = ifof.cumsum()
- istart = np.digitize(fofend - ifof[0], subend - isub[0], right=False) - 1
- iend = np.digitize(fofend, subend, right=True)
+ istart = np.digitize(fofend - ifof, subend - isub) - 1
+ iend = np.clip(np.digitize(fofend, subend), 0, ifof.size - 2)
for i, data_file in enumerate(self.data_files):
- data_file.offset_files = \
- [self.parameter_file.filename_template % {'num':ind}
- for ind in range(istart[i], iend[i] + 1)]
+ data_file.offset_files = self.data_files[istart[i]: iend[i] + 1]
def _setup_geometry(self):
super(SubfindParticleIndex, self)._setup_geometry()
- self._calculate_particle_index_offsets()
+ self._calculate_particle_index_starts()
self._calculate_file_offset_map()
class SubfindHDF5File(ParticleFile):
diff -r 935fdb6bcbb3b1dfdd5b6a828428af3db653ed68 -r 4f6f5cf77fd5bb6feb33c91be233df7413764da8 yt/frontends/halo_catalogs/subfind/io.py
--- a/yt/frontends/halo_catalogs/subfind/io.py
+++ b/yt/frontends/halo_catalogs/subfind/io.py
@@ -55,6 +55,22 @@
z = coords[:, 2]
yield ptype, (x, y, z)
+ def _read_offset_particle_field(self, field, data_file, fh):
+ field_data = np.empty(data_file.total_particles["FOF"], dtype="float64")
+ fofindex = np.arange(data_file.total_particles["FOF"]) + data_file.index_start["FOF"]
+ for offset_file in data_file.offset_files:
+ if fh.filename == offset_file.filename:
+ ofh = fh
+ else:
+ ofh = h5py.File(offset_file.filename, "r")
+ subindex = np.arange(offset_file.total_offset) + offset_file.offset_start
+ substart = max(fofindex[0] - subindex[0], 0)
+ subend = min(fofindex[-1] - subindex[0], subindex.size - 1)
+ fofstart = substart + subindex[0] - fofindex[0]
+ fofend = subend + subindex[0] - fofindex[0]
+ field_data[fofstart:fofend + 1] = ofh["SUBFIND"][field][substart:subend + 1]
+ return field_data
+
def _read_particle_fields(self, chunks, ptf, selector):
# Now we have all the sizes, and we can allocate
chunks = list(chunks)
@@ -76,12 +92,13 @@
if mask is None: continue
for field in field_list:
if field in self.offset_fields:
- raise RuntimeError
+ field_data = \
+ self._read_offset_particle_field(field, data_file, f)
else:
if field == "particle_identifier":
field_data = \
np.arange(data_file.total_particles[ptype]) + \
- data_file.index_offset[ptype]
+ data_file.index_start[ptype]
elif field in f[ptype].keys():
field_data = f[ptype][field].value.astype("float64")
else:
https://bitbucket.org/yt_analysis/yt/commits/686df27ff094/
Changeset: 686df27ff094
Branch: yt-3.0
User: brittonsmith
Date: 2014-05-02 20:27:11
Summary: Fixing how fields are checked to be in files.
Affected #: 1 file
diff -r 4f6f5cf77fd5bb6feb33c91be233df7413764da8 -r 686df27ff094cbc08d1341aabbf526acec4d9d71 yt/frontends/halo_catalogs/subfind/io.py
--- a/yt/frontends/halo_catalogs/subfind/io.py
+++ b/yt/frontends/halo_catalogs/subfind/io.py
@@ -99,7 +99,7 @@
field_data = \
np.arange(data_file.total_particles[ptype]) + \
data_file.index_start[ptype]
- elif field in f[ptype].keys():
+ elif field in f[ptype]:
field_data = f[ptype][field].value.astype("float64")
else:
fname = field[:field.rfind("_")]
https://bitbucket.org/yt_analysis/yt/commits/e8500243bf20/
Changeset: e8500243bf20
Branch: yt-3.0
User: brittonsmith
Date: 2014-05-02 20:43:24
Summary: Adding some more known fields.
Affected #: 1 file
diff -r 686df27ff094cbc08d1341aabbf526acec4d9d71 -r e8500243bf20f67ba724f42677416d8e45f770e5 yt/frontends/halo_catalogs/subfind/fields.py
--- a/yt/frontends/halo_catalogs/subfind/fields.py
+++ b/yt/frontends/halo_catalogs/subfind/fields.py
@@ -20,7 +20,8 @@
from yt.units.yt_array import \
YTArray
-m_units = "1e10 * Msun/h"
+m_units = "code_mass"
+mdot_units = "code_mass / code_time"
p_units = "Mpccm/h"
v_units = "1e5 * cmcm / s"
@@ -50,4 +51,8 @@
("Halo_R_Mean2500", (p_units, [], None)),
("Halo_R_Mean500", (p_units, [], None)),
("Halo_R_TopHat200", (p_units, [], None)),
+ ("BH_Mass", (m_units, [], None)),
+ ("Stars/Mass", (m_units, [], None)),
+ ("BH_Mdot", (mdot_units, [], None)),
+ ("StarFormationRate", (mdot_units, [], None)),
)
https://bitbucket.org/yt_analysis/yt/commits/2d9d69de8ff7/
Changeset: 2d9d69de8ff7
Branch: yt-3.0
User: brittonsmith
Date: 2014-05-02 20:50:29
Summary: Renaming Subfind as OWLSSubfind.
Affected #: 11 files
diff -r e8500243bf20f67ba724f42677416d8e45f770e5 -r 2d9d69de8ff79eb5e12c64a296da55e7004c63b2 yt/frontends/halo_catalogs/api.py
--- a/yt/frontends/halo_catalogs/api.py
+++ b/yt/frontends/halo_catalogs/api.py
@@ -24,7 +24,7 @@
IOHandlerRockstarBinary, \
RockstarFieldInfo
-from .subfind.api import \
- SubfindDataset, \
- IOHandlerSubfindHDF5, \
- SubfindFieldInfo
+from .owls_subfind.api import \
+ OWLSSubfindDataset, \
+ IOHandlerOWLSSubfindHDF5, \
+ OWLSSubfindFieldInfo
diff -r e8500243bf20f67ba724f42677416d8e45f770e5 -r 2d9d69de8ff79eb5e12c64a296da55e7004c63b2 yt/frontends/halo_catalogs/owls_subfind/__init__.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/owls_subfind/__init__.py
@@ -0,0 +1,15 @@
+"""
+API for HaloCatalog frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
diff -r e8500243bf20f67ba724f42677416d8e45f770e5 -r 2d9d69de8ff79eb5e12c64a296da55e7004c63b2 yt/frontends/halo_catalogs/owls_subfind/api.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/owls_subfind/api.py
@@ -0,0 +1,24 @@
+"""
+API for OWLSSubfind frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+ OWLSSubfindDataset
+
+from .io import \
+ IOHandlerOWLSSubfindHDF5
+
+from .fields import \
+ OWLSSubfindFieldInfo
diff -r e8500243bf20f67ba724f42677416d8e45f770e5 -r 2d9d69de8ff79eb5e12c64a296da55e7004c63b2 yt/frontends/halo_catalogs/owls_subfind/data_structures.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/owls_subfind/data_structures.py
@@ -0,0 +1,205 @@
+"""
+Data structures for OWLSSubfind frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from collections import defaultdict
+import h5py
+import numpy as np
+import stat
+import weakref
+import struct
+import glob
+import time
+import os
+
+from .fields import \
+ OWLSSubfindFieldInfo
+
+from yt.utilities.cosmology import Cosmology
+from yt.utilities.definitions import \
+ mpc_conversion, sec_conversion
+from yt.utilities.exceptions import \
+ YTException
+from yt.geometry.particle_geometry_handler import \
+ ParticleIndex
+from yt.data_objects.static_output import \
+ Dataset, \
+ ParticleFile
+from yt.frontends.sph.data_structures import \
+ _fix_unit_ordering
+import yt.utilities.fortran_utils as fpu
+from yt.units.yt_array import \
+ YTArray, \
+ YTQuantity
+
+class OWLSSubfindParticleIndex(ParticleIndex):
+ def __init__(self, pf, dataset_type):
+ super(OWLSSubfindParticleIndex, self).__init__(pf, dataset_type)
+
+ def _calculate_particle_index_starts(self):
+ # Halo indices are not saved in the file, so we must count by hand.
+ # File 0 has halos 0 to N_0 - 1, file 1 has halos N_0 to N_0 + N_1 - 1, etc.
+ particle_count = defaultdict(int)
+ offset_count = 0
+ for data_file in self.data_files:
+ data_file.index_start = dict([(ptype, particle_count[ptype]) for
+ ptype in data_file.total_particles])
+ data_file.offset_start = offset_count
+ for ptype in data_file.total_particles:
+ particle_count[ptype] += data_file.total_particles[ptype]
+ offset_count += data_file.total_offset
+
+ def _calculate_file_offset_map(self):
+ # After the FOF is performed, a load-balancing step redistributes halos
+ # and then writes more fields. Here, for each file, we create a list of
+ # files which contain the rest of the redistributed particles.
+ ifof = np.array([data_file.total_particles["FOF"]
+ for data_file in self.data_files])
+ isub = np.array([data_file.total_offset
+ for data_file in self.data_files])
+ subend = isub.cumsum()
+ fofend = ifof.cumsum()
+ istart = np.digitize(fofend - ifof, subend - isub) - 1
+ iend = np.clip(np.digitize(fofend, subend), 0, ifof.size - 2)
+ for i, data_file in enumerate(self.data_files):
+ data_file.offset_files = self.data_files[istart[i]: iend[i] + 1]
+
+ def _setup_geometry(self):
+ super(OWLSSubfindParticleIndex, self)._setup_geometry()
+ self._calculate_particle_index_starts()
+ self._calculate_file_offset_map()
+
+class OWLSSubfindHDF5File(ParticleFile):
+ def __init__(self, pf, io, filename, file_id):
+ super(OWLSSubfindHDF5File, self).__init__(pf, io, filename, file_id)
+ with h5py.File(filename, "r") as f:
+ self.header = dict((field, f.attrs[field]) \
+ for field in f.attrs.keys())
+
+class OWLSSubfindDataset(Dataset):
+ _index_class = OWLSSubfindParticleIndex
+ _file_class = OWLSSubfindHDF5File
+ _field_info_class = OWLSSubfindFieldInfo
+ _suffix = ".hdf5"
+
+ def __init__(self, filename, dataset_type="subfind_hdf5",
+ n_ref = 16, over_refine_factor = 1):
+ self.n_ref = n_ref
+ self.over_refine_factor = over_refine_factor
+ super(OWLSSubfindDataset, self).__init__(filename, dataset_type)
+
+ def _parse_parameter_file(self):
+ handle = h5py.File(self.parameter_filename, mode="r")
+ hvals = {}
+ hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
+ hvals["NumFiles"] = hvals["NumFilesPerSnapshot"]
+ hvals["Massarr"] = hvals["MassTable"]
+
+ self.dimensionality = 3
+ self.refine_by = 2
+ self.unique_identifier = \
+ int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+ # Set standard values
+ self.current_time = self.quan(hvals["Time_GYR"] * sec_conversion["Gyr"], "s")
+ self.domain_left_edge = np.zeros(3, "float64")
+ self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
+ nz = 1 << self.over_refine_factor
+ self.domain_dimensions = np.ones(3, "int32") * nz
+ self.cosmological_simulation = 1
+ self.periodicity = (True, True, True)
+ self.current_redshift = hvals["Redshift"]
+ self.omega_lambda = hvals["OmegaLambda"]
+ self.omega_matter = hvals["Omega0"]
+ self.hubble_constant = hvals["HubbleParam"]
+ self.parameters = hvals
+ prefix = os.path.abspath(
+ os.path.join(os.path.dirname(self.parameter_filename),
+ os.path.basename(self.parameter_filename).split(".", 1)[0]))
+
+ suffix = self.parameter_filename.rsplit(".", 1)[-1]
+ self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix)
+ self.file_count = len(glob.glob(prefix + "*" + self._suffix))
+ if self.file_count == 0:
+ raise YTException(message="No data files found.", pf=self)
+ self.particle_types = ("FOF", "SUBFIND")
+ self.particle_types_raw = ("FOF", "SUBFIND")
+
+ # To avoid having to open files twice
+ self._unit_base = {}
+ self._unit_base.update(
+ (str(k), v) for k, v in handle["/Units"].attrs.items())
+ # Comoving cm is given in the Units
+ self._unit_base['cmcm'] = 1.0 / self._unit_base["UnitLength_in_cm"]
+ handle.close()
+
+ def _set_code_unit_attributes(self):
+ # Set a sane default for cosmological simulations.
+ if self._unit_base is None and self.cosmological_simulation == 1:
+ mylog.info("Assuming length units are in Mpc/h (comoving)")
+ self._unit_base = dict(length = (1.0, "Mpccm/h"))
+ # The other same defaults we will use from the standard Gadget
+ # defaults.
+ unit_base = self._unit_base or {}
+ if "length" in unit_base:
+ length_unit = unit_base["length"]
+ elif "UnitLength_in_cm" in unit_base:
+ if self.cosmological_simulation == 0:
+ length_unit = (unit_base["UnitLength_in_cm"], "cm")
+ else:
+ length_unit = (unit_base["UnitLength_in_cm"], "cmcm/h")
+ else:
+ raise RuntimeError
+ length_unit = _fix_unit_ordering(length_unit)
+ self.length_unit = self.quan(length_unit[0], length_unit[1])
+
+ unit_base = self._unit_base or {}
+ if "velocity" in unit_base:
+ velocity_unit = unit_base["velocity"]
+ elif "UnitVelocity_in_cm_per_s" in unit_base:
+ velocity_unit = (unit_base["UnitVelocity_in_cm_per_s"], "cm/s")
+ else:
+ velocity_unit = (1e5, "cm/s")
+ velocity_unit = _fix_unit_ordering(velocity_unit)
+ self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
+ # We set hubble_constant = 1.0 for non-cosmology, so this is safe.
+ # Default to 1e10 Msun/h if mass is not specified.
+ if "mass" in unit_base:
+ mass_unit = unit_base["mass"]
+ elif "UnitMass_in_g" in unit_base:
+ if self.cosmological_simulation == 0:
+ mass_unit = (unit_base["UnitMass_in_g"], "g")
+ else:
+ mass_unit = (unit_base["UnitMass_in_g"], "g/h")
+ else:
+ # Sane default
+ mass_unit = (1.0, "1e10*Msun/h")
+ mass_unit = _fix_unit_ordering(mass_unit)
+ self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
+ self.time_unit = self.quan(unit_base["UnitTime_in_s"], "s")
+
+ @classmethod
+ def _is_valid(self, *args, **kwargs):
+ try:
+ fileh = h5py.File(args[0], mode='r')
+ if "Constants" in fileh["/"].keys() and \
+ "Header" in fileh["/"].keys() and \
+ "SUBFIND" in fileh["/"].keys():
+ fileh.close()
+ return True
+ fileh.close()
+ except:
+ pass
+ return False
diff -r e8500243bf20f67ba724f42677416d8e45f770e5 -r 2d9d69de8ff79eb5e12c64a296da55e7004c63b2 yt/frontends/halo_catalogs/owls_subfind/fields.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/owls_subfind/fields.py
@@ -0,0 +1,58 @@
+"""
+OWLSSubfind-specific fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.funcs import mylog
+from yt.fields.field_info_container import \
+ FieldInfoContainer
+from yt.units.yt_array import \
+ YTArray
+
+m_units = "code_mass"
+mdot_units = "code_mass / code_time"
+p_units = "Mpccm/h"
+v_units = "1e5 * cmcm / s"
+
+class OWLSSubfindFieldInfo(FieldInfoContainer):
+ known_other_fields = (
+ )
+
+ known_particle_fields = (
+ ("CenterOfMass_0", (p_units, ["particle_position_x"], None)),
+ ("CenterOfMass_1", (p_units, ["particle_position_y"], None)),
+ ("CenterOfMass_2", (p_units, ["particle_position_z"], None)),
+ ("CenterOfMassVelocity_0", (v_units, ["particle_velocity_x"], None)),
+ ("CenterOfMassVelocity_1", (v_units, ["particle_velocity_y"], None)),
+ ("CenterOfMassVelocity_2", (v_units, ["particle_velocity_z"], None)),
+ ("Mass", (m_units, ["particle_mass"], None)),
+ ("Halo_M_Crit200", (m_units, ["Virial Mass"], None)),
+ ("Halo_M_Crit2500", (m_units, [], None)),
+ ("Halo_M_Crit500", (m_units, [], None)),
+ ("Halo_M_Mean200", (m_units, [], None)),
+ ("Halo_M_Mean2500", (m_units, [], None)),
+ ("Halo_M_Mean500", (m_units, [], None)),
+ ("Halo_M_TopHat200", (m_units, [], None)),
+ ("Halo_R_Crit200", (p_units, ["Virial Radius"], None)),
+ ("Halo_R_Crit2500", (p_units, [], None)),
+ ("Halo_R_Crit500", (p_units, [], None)),
+ ("Halo_R_Mean200", (p_units, [], None)),
+ ("Halo_R_Mean2500", (p_units, [], None)),
+ ("Halo_R_Mean500", (p_units, [], None)),
+ ("Halo_R_TopHat200", (p_units, [], None)),
+ ("BH_Mass", (m_units, [], None)),
+ ("Stars/Mass", (m_units, [], None)),
+ ("BH_Mdot", (mdot_units, [], None)),
+ ("StarFormationRate", (mdot_units, [], None)),
+)
diff -r e8500243bf20f67ba724f42677416d8e45f770e5 -r 2d9d69de8ff79eb5e12c64a296da55e7004c63b2 yt/frontends/halo_catalogs/owls_subfind/io.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/owls_subfind/io.py
@@ -0,0 +1,209 @@
+"""
+OWLSSubfind data-file handling function
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+
+from yt.utilities.exceptions import *
+from yt.funcs import mylog
+
+from yt.utilities.io_handler import \
+ BaseIOHandler
+
+from yt.utilities.lib.geometry_utils import compute_morton
+
+from yt.geometry.oct_container import _ORDER_MAX
+
+class IOHandlerOWLSSubfindHDF5(BaseIOHandler):
+ _dataset_type = "subfind_hdf5"
+
+ def __init__(self, pf):
+ super(IOHandlerOWLSSubfindHDF5, self).__init__(pf)
+ self.offset_fields = set([])
+
+ def _read_fluid_selection(self, chunks, selector, fields, size):
+ raise NotImplementedError
+
+ def _read_particle_coords(self, chunks, ptf):
+ # This will read chunks and yield the results.
+ chunks = list(chunks)
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype, field_list in sorted(ptf.items()):
+ pcount = data_file.total_particles[ptype]
+ coords = f[ptype]["CenterOfMass"].value.astype("float64")
+ coords = np.resize(coords, (pcount, 3))
+ x = coords[:, 0]
+ y = coords[:, 1]
+ z = coords[:, 2]
+ yield ptype, (x, y, z)
+
+ def _read_offset_particle_field(self, field, data_file, fh):
+ field_data = np.empty(data_file.total_particles["FOF"], dtype="float64")
+ fofindex = np.arange(data_file.total_particles["FOF"]) + data_file.index_start["FOF"]
+ for offset_file in data_file.offset_files:
+ if fh.filename == offset_file.filename:
+ ofh = fh
+ else:
+ ofh = h5py.File(offset_file.filename, "r")
+ subindex = np.arange(offset_file.total_offset) + offset_file.offset_start
+ substart = max(fofindex[0] - subindex[0], 0)
+ subend = min(fofindex[-1] - subindex[0], subindex.size - 1)
+ fofstart = substart + subindex[0] - fofindex[0]
+ fofend = subend + subindex[0] - fofindex[0]
+ field_data[fofstart:fofend + 1] = ofh["SUBFIND"][field][substart:subend + 1]
+ return field_data
+
+ def _read_particle_fields(self, chunks, ptf, selector):
+ # Now we have all the sizes, and we can allocate
+ chunks = list(chunks)
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype, field_list in sorted(ptf.items()):
+ pcount = data_file.total_particles[ptype]
+ coords = f[ptype]["CenterOfMass"].value.astype("float64")
+ coords = np.resize(coords, (pcount, 3))
+ x = coords[:, 0]
+ y = coords[:, 1]
+ z = coords[:, 2]
+ mask = selector.select_points(x, y, z)
+ del x, y, z
+ if mask is None: continue
+ for field in field_list:
+ if field in self.offset_fields:
+ field_data = \
+ self._read_offset_particle_field(field, data_file, f)
+ else:
+ if field == "particle_identifier":
+ field_data = \
+ np.arange(data_file.total_particles[ptype]) + \
+ data_file.index_start[ptype]
+ elif field in f[ptype]:
+ field_data = f[ptype][field].value.astype("float64")
+ else:
+ fname = field[:field.rfind("_")]
+ field_data = f[ptype][fname].value.astype("float64")
+ my_div = field_data.size / pcount
+ if my_div > 1:
+ field_data = np.resize(field_data, (pcount, my_div))
+ findex = int(field[field.rfind("_") + 1:])
+ field_data = field_data[:, findex]
+ data = field_data[mask]
+ yield (ptype, field), data
+
+ def _initialize_index(self, data_file, regions):
+ pcount = sum(self._count_particles(data_file).values())
+ morton = np.empty(pcount, dtype='uint64')
+ mylog.debug("Initializing index % 5i (% 7i particles)",
+ data_file.file_id, pcount)
+ ind = 0
+ with h5py.File(data_file.filename, "r") as f:
+ if not f.keys(): return None
+ dx = np.finfo(f["FOF"]['CenterOfMass'].dtype).eps
+ dx = 2.0*self.pf.quan(dx, "code_length")
+
+ for ptype, pattr in zip(["FOF", "SUBFIND"],
+ ["Number_of_groups", "Number_of_subgroups"]):
+ my_pcount = f[ptype].attrs[pattr]
+ pos = f[ptype]["CenterOfMass"].value.astype("float64")
+ pos = np.resize(pos, (my_pcount, 3))
+ pos = data_file.pf.arr(pos, "code_length")
+
+ # These are 32 bit numbers, so we give a little lee-way.
+ # Otherwise, for big sets of particles, we often will bump into the
+ # domain edges. This helps alleviate that.
+ np.clip(pos, self.pf.domain_left_edge + dx,
+ self.pf.domain_right_edge - dx, pos)
+ if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
+ np.any(pos.max(axis=0) > self.pf.domain_right_edge):
+ raise YTDomainOverflow(pos.min(axis=0),
+ pos.max(axis=0),
+ self.pf.domain_left_edge,
+ self.pf.domain_right_edge)
+ regions.add_data_file(pos, data_file.file_id)
+ morton[ind:ind+pos.shape[0]] = compute_morton(
+ pos[:,0], pos[:,1], pos[:,2],
+ data_file.pf.domain_left_edge,
+ data_file.pf.domain_right_edge)
+ ind += pos.shape[0]
+ return morton
+
+ def _count_particles(self, data_file):
+ with h5py.File(data_file.filename, "r") as f:
+ # We need this to figure out where the offset fields are stored.
+ data_file.total_offset = f["SUBFIND"].attrs["Number_of_groups"]
+ return {"FOF": f["FOF"].attrs["Number_of_groups"],
+ "SUBFIND": f["FOF"].attrs["Number_of_subgroups"]}
+
+ def _identify_fields(self, data_file):
+ fields = [(ptype, "particle_identifier")
+ for ptype in self.pf.particle_types_raw]
+ pcount = data_file.total_particles
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype in self.pf.particle_types_raw:
+ my_fields, my_offset_fields = \
+ subfind_field_list(f[ptype], ptype, data_file.total_particles)
+ fields.extend(my_fields)
+ self.offset_fields = self.offset_fields.union(set(my_offset_fields))
+ return fields, {}
+
+def subfind_field_list(fh, ptype, pcount):
+ fields = []
+ offset_fields = []
+ for field in fh.keys():
+ if "PartType" in field:
+ # These are halo member particles
+ continue
+ elif isinstance(fh[field], h5py.Group):
+ my_fields, my_offset_fields = \
+ subfind_field_list(fh[field], ptype, pcount)
+ fields.extend(my_fields)
+ my_offset_fields.extend(offset_fields)
+ else:
+ if not fh[field].size % pcount[ptype]:
+ my_div = fh[field].size / pcount[ptype]
+ fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
+ if my_div > 1:
+ for i in range(my_div):
+ fields.append((ptype, "%s_%d" % (fname, i)))
+ else:
+ fields.append((ptype, fname))
+ elif ptype == "SUBFIND" and \
+ not fh[field].size % fh["/SUBFIND"].attrs["Number_of_groups"]:
+ # These are actually FOF fields, but they were written after
+ # a load balancing step moved halos around and thus they do not
+ # correspond to the halos stored in the FOF group.
+ my_div = fh[field].size / fh["/SUBFIND"].attrs["Number_of_groups"]
+ fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
+ if my_div > 1:
+ for i in range(my_div):
+ fields.append(("FOF", "%s_%d" % (fname, i)))
+ else:
+ fields.append(("FOF", fname))
+ offset_fields.append(fname)
+ else:
+ mylog.warn("Cannot add field (%s, %s) with size %d." % \
+ (ptype, fh[field].name, fh[field].size))
+ continue
+ return fields, offset_fields
diff -r e8500243bf20f67ba724f42677416d8e45f770e5 -r 2d9d69de8ff79eb5e12c64a296da55e7004c63b2 yt/frontends/halo_catalogs/subfind/__init__.py
--- a/yt/frontends/halo_catalogs/subfind/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-"""
-API for HaloCatalog frontend.
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
diff -r e8500243bf20f67ba724f42677416d8e45f770e5 -r 2d9d69de8ff79eb5e12c64a296da55e7004c63b2 yt/frontends/halo_catalogs/subfind/api.py
--- a/yt/frontends/halo_catalogs/subfind/api.py
+++ /dev/null
@@ -1,24 +0,0 @@
-"""
-API for Subfind frontend
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2014, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
- SubfindDataset
-
-from .io import \
- IOHandlerSubfindHDF5
-
-from .fields import \
- SubfindFieldInfo
diff -r e8500243bf20f67ba724f42677416d8e45f770e5 -r 2d9d69de8ff79eb5e12c64a296da55e7004c63b2 yt/frontends/halo_catalogs/subfind/data_structures.py
--- a/yt/frontends/halo_catalogs/subfind/data_structures.py
+++ /dev/null
@@ -1,205 +0,0 @@
-"""
-Data structures for Subfind frontend.
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from collections import defaultdict
-import h5py
-import numpy as np
-import stat
-import weakref
-import struct
-import glob
-import time
-import os
-
-from .fields import \
- SubfindFieldInfo
-
-from yt.utilities.cosmology import Cosmology
-from yt.utilities.definitions import \
- mpc_conversion, sec_conversion
-from yt.utilities.exceptions import \
- YTException
-from yt.geometry.particle_geometry_handler import \
- ParticleIndex
-from yt.data_objects.static_output import \
- Dataset, \
- ParticleFile
-from yt.frontends.sph.data_structures import \
- _fix_unit_ordering
-import yt.utilities.fortran_utils as fpu
-from yt.units.yt_array import \
- YTArray, \
- YTQuantity
-
-class SubfindParticleIndex(ParticleIndex):
- def __init__(self, pf, dataset_type):
- super(SubfindParticleIndex, self).__init__(pf, dataset_type)
-
- def _calculate_particle_index_starts(self):
- # Halo indices are not saved in the file, so we must count by hand.
- # File 0 has halos 0 to N_0 - 1, file 1 has halos N_0 to N_0 + N_1 - 1, etc.
- particle_count = defaultdict(int)
- offset_count = 0
- for data_file in self.data_files:
- data_file.index_start = dict([(ptype, particle_count[ptype]) for
- ptype in data_file.total_particles])
- data_file.offset_start = offset_count
- for ptype in data_file.total_particles:
- particle_count[ptype] += data_file.total_particles[ptype]
- offset_count += data_file.total_offset
-
- def _calculate_file_offset_map(self):
- # After the FOF is performed, a load-balancing step redistributes halos
- # and then writes more fields. Here, for each file, we create a list of
- # files which contain the rest of the redistributed particles.
- ifof = np.array([data_file.total_particles["FOF"]
- for data_file in self.data_files])
- isub = np.array([data_file.total_offset
- for data_file in self.data_files])
- subend = isub.cumsum()
- fofend = ifof.cumsum()
- istart = np.digitize(fofend - ifof, subend - isub) - 1
- iend = np.clip(np.digitize(fofend, subend), 0, ifof.size - 2)
- for i, data_file in enumerate(self.data_files):
- data_file.offset_files = self.data_files[istart[i]: iend[i] + 1]
-
- def _setup_geometry(self):
- super(SubfindParticleIndex, self)._setup_geometry()
- self._calculate_particle_index_starts()
- self._calculate_file_offset_map()
-
-class SubfindHDF5File(ParticleFile):
- def __init__(self, pf, io, filename, file_id):
- super(SubfindHDF5File, self).__init__(pf, io, filename, file_id)
- with h5py.File(filename, "r") as f:
- self.header = dict((field, f.attrs[field]) \
- for field in f.attrs.keys())
-
-class SubfindDataset(Dataset):
- _index_class = SubfindParticleIndex
- _file_class = SubfindHDF5File
- _field_info_class = SubfindFieldInfo
- _suffix = ".hdf5"
-
- def __init__(self, filename, dataset_type="subfind_hdf5",
- n_ref = 16, over_refine_factor = 1):
- self.n_ref = n_ref
- self.over_refine_factor = over_refine_factor
- super(SubfindDataset, self).__init__(filename, dataset_type)
-
- def _parse_parameter_file(self):
- handle = h5py.File(self.parameter_filename, mode="r")
- hvals = {}
- hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
- hvals["NumFiles"] = hvals["NumFilesPerSnapshot"]
- hvals["Massarr"] = hvals["MassTable"]
-
- self.dimensionality = 3
- self.refine_by = 2
- self.unique_identifier = \
- int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-
- # Set standard values
- self.current_time = self.quan(hvals["Time_GYR"] * sec_conversion["Gyr"], "s")
- self.domain_left_edge = np.zeros(3, "float64")
- self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
- nz = 1 << self.over_refine_factor
- self.domain_dimensions = np.ones(3, "int32") * nz
- self.cosmological_simulation = 1
- self.periodicity = (True, True, True)
- self.current_redshift = hvals["Redshift"]
- self.omega_lambda = hvals["OmegaLambda"]
- self.omega_matter = hvals["Omega0"]
- self.hubble_constant = hvals["HubbleParam"]
- self.parameters = hvals
- prefix = os.path.abspath(
- os.path.join(os.path.dirname(self.parameter_filename),
- os.path.basename(self.parameter_filename).split(".", 1)[0]))
-
- suffix = self.parameter_filename.rsplit(".", 1)[-1]
- self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix)
- self.file_count = len(glob.glob(prefix + "*" + self._suffix))
- if self.file_count == 0:
- raise YTException(message="No data files found.", pf=self)
- self.particle_types = ("FOF", "SUBFIND")
- self.particle_types_raw = ("FOF", "SUBFIND")
-
- # To avoid having to open files twice
- self._unit_base = {}
- self._unit_base.update(
- (str(k), v) for k, v in handle["/Units"].attrs.items())
- # Comoving cm is given in the Units
- self._unit_base['cmcm'] = 1.0 / self._unit_base["UnitLength_in_cm"]
- handle.close()
-
- def _set_code_unit_attributes(self):
- # Set a sane default for cosmological simulations.
- if self._unit_base is None and self.cosmological_simulation == 1:
- mylog.info("Assuming length units are in Mpc/h (comoving)")
- self._unit_base = dict(length = (1.0, "Mpccm/h"))
- # The other same defaults we will use from the standard Gadget
- # defaults.
- unit_base = self._unit_base or {}
- if "length" in unit_base:
- length_unit = unit_base["length"]
- elif "UnitLength_in_cm" in unit_base:
- if self.cosmological_simulation == 0:
- length_unit = (unit_base["UnitLength_in_cm"], "cm")
- else:
- length_unit = (unit_base["UnitLength_in_cm"], "cmcm/h")
- else:
- raise RuntimeError
- length_unit = _fix_unit_ordering(length_unit)
- self.length_unit = self.quan(length_unit[0], length_unit[1])
-
- unit_base = self._unit_base or {}
- if "velocity" in unit_base:
- velocity_unit = unit_base["velocity"]
- elif "UnitVelocity_in_cm_per_s" in unit_base:
- velocity_unit = (unit_base["UnitVelocity_in_cm_per_s"], "cm/s")
- else:
- velocity_unit = (1e5, "cm/s")
- velocity_unit = _fix_unit_ordering(velocity_unit)
- self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
- # We set hubble_constant = 1.0 for non-cosmology, so this is safe.
- # Default to 1e10 Msun/h if mass is not specified.
- if "mass" in unit_base:
- mass_unit = unit_base["mass"]
- elif "UnitMass_in_g" in unit_base:
- if self.cosmological_simulation == 0:
- mass_unit = (unit_base["UnitMass_in_g"], "g")
- else:
- mass_unit = (unit_base["UnitMass_in_g"], "g/h")
- else:
- # Sane default
- mass_unit = (1.0, "1e10*Msun/h")
- mass_unit = _fix_unit_ordering(mass_unit)
- self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
- self.time_unit = self.quan(unit_base["UnitTime_in_s"], "s")
-
- @classmethod
- def _is_valid(self, *args, **kwargs):
- try:
- fileh = h5py.File(args[0], mode='r')
- if "Constants" in fileh["/"].keys() and \
- "Header" in fileh["/"].keys() and \
- "SUBFIND" in fileh["/"].keys():
- fileh.close()
- return True
- fileh.close()
- except:
- pass
- return False
diff -r e8500243bf20f67ba724f42677416d8e45f770e5 -r 2d9d69de8ff79eb5e12c64a296da55e7004c63b2 yt/frontends/halo_catalogs/subfind/fields.py
--- a/yt/frontends/halo_catalogs/subfind/fields.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""
-Subfind-specific fields
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.funcs import mylog
-from yt.fields.field_info_container import \
- FieldInfoContainer
-from yt.units.yt_array import \
- YTArray
-
-m_units = "code_mass"
-mdot_units = "code_mass / code_time"
-p_units = "Mpccm/h"
-v_units = "1e5 * cmcm / s"
-
-class SubfindFieldInfo(FieldInfoContainer):
- known_other_fields = (
- )
-
- known_particle_fields = (
- ("CenterOfMass_0", (p_units, ["particle_position_x"], None)),
- ("CenterOfMass_1", (p_units, ["particle_position_y"], None)),
- ("CenterOfMass_2", (p_units, ["particle_position_z"], None)),
- ("CenterOfMassVelocity_0", (v_units, ["particle_velocity_x"], None)),
- ("CenterOfMassVelocity_1", (v_units, ["particle_velocity_y"], None)),
- ("CenterOfMassVelocity_2", (v_units, ["particle_velocity_z"], None)),
- ("Mass", (m_units, ["particle_mass"], None)),
- ("Halo_M_Crit200", (m_units, ["Virial Mass"], None)),
- ("Halo_M_Crit2500", (m_units, [], None)),
- ("Halo_M_Crit500", (m_units, [], None)),
- ("Halo_M_Mean200", (m_units, [], None)),
- ("Halo_M_Mean2500", (m_units, [], None)),
- ("Halo_M_Mean500", (m_units, [], None)),
- ("Halo_M_TopHat200", (m_units, [], None)),
- ("Halo_R_Crit200", (p_units, ["Virial Radius"], None)),
- ("Halo_R_Crit2500", (p_units, [], None)),
- ("Halo_R_Crit500", (p_units, [], None)),
- ("Halo_R_Mean200", (p_units, [], None)),
- ("Halo_R_Mean2500", (p_units, [], None)),
- ("Halo_R_Mean500", (p_units, [], None)),
- ("Halo_R_TopHat200", (p_units, [], None)),
- ("BH_Mass", (m_units, [], None)),
- ("Stars/Mass", (m_units, [], None)),
- ("BH_Mdot", (mdot_units, [], None)),
- ("StarFormationRate", (mdot_units, [], None)),
-)
diff -r e8500243bf20f67ba724f42677416d8e45f770e5 -r 2d9d69de8ff79eb5e12c64a296da55e7004c63b2 yt/frontends/halo_catalogs/subfind/io.py
--- a/yt/frontends/halo_catalogs/subfind/io.py
+++ /dev/null
@@ -1,209 +0,0 @@
-"""
-Subfind data-file handling function
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-
-from yt.utilities.exceptions import *
-from yt.funcs import mylog
-
-from yt.utilities.io_handler import \
- BaseIOHandler
-
-from yt.utilities.lib.geometry_utils import compute_morton
-
-from yt.geometry.oct_container import _ORDER_MAX
-
-class IOHandlerSubfindHDF5(BaseIOHandler):
- _dataset_type = "subfind_hdf5"
-
- def __init__(self, pf):
- super(IOHandlerSubfindHDF5, self).__init__(pf)
- self.offset_fields = set([])
-
- def _read_fluid_selection(self, chunks, selector, fields, size):
- raise NotImplementedError
-
- def _read_particle_coords(self, chunks, ptf):
- # This will read chunks and yield the results.
- chunks = list(chunks)
- data_files = set([])
- for chunk in chunks:
- for obj in chunk.objs:
- data_files.update(obj.data_files)
- for data_file in data_files:
- with h5py.File(data_file.filename, "r") as f:
- for ptype, field_list in sorted(ptf.items()):
- pcount = data_file.total_particles[ptype]
- coords = f[ptype]["CenterOfMass"].value.astype("float64")
- coords = np.resize(coords, (pcount, 3))
- x = coords[:, 0]
- y = coords[:, 1]
- z = coords[:, 2]
- yield ptype, (x, y, z)
-
- def _read_offset_particle_field(self, field, data_file, fh):
- field_data = np.empty(data_file.total_particles["FOF"], dtype="float64")
- fofindex = np.arange(data_file.total_particles["FOF"]) + data_file.index_start["FOF"]
- for offset_file in data_file.offset_files:
- if fh.filename == offset_file.filename:
- ofh = fh
- else:
- ofh = h5py.File(offset_file.filename, "r")
- subindex = np.arange(offset_file.total_offset) + offset_file.offset_start
- substart = max(fofindex[0] - subindex[0], 0)
- subend = min(fofindex[-1] - subindex[0], subindex.size - 1)
- fofstart = substart + subindex[0] - fofindex[0]
- fofend = subend + subindex[0] - fofindex[0]
- field_data[fofstart:fofend + 1] = ofh["SUBFIND"][field][substart:subend + 1]
- return field_data
-
- def _read_particle_fields(self, chunks, ptf, selector):
- # Now we have all the sizes, and we can allocate
- chunks = list(chunks)
- data_files = set([])
- for chunk in chunks:
- for obj in chunk.objs:
- data_files.update(obj.data_files)
- for data_file in data_files:
- with h5py.File(data_file.filename, "r") as f:
- for ptype, field_list in sorted(ptf.items()):
- pcount = data_file.total_particles[ptype]
- coords = f[ptype]["CenterOfMass"].value.astype("float64")
- coords = np.resize(coords, (pcount, 3))
- x = coords[:, 0]
- y = coords[:, 1]
- z = coords[:, 2]
- mask = selector.select_points(x, y, z)
- del x, y, z
- if mask is None: continue
- for field in field_list:
- if field in self.offset_fields:
- field_data = \
- self._read_offset_particle_field(field, data_file, f)
- else:
- if field == "particle_identifier":
- field_data = \
- np.arange(data_file.total_particles[ptype]) + \
- data_file.index_start[ptype]
- elif field in f[ptype]:
- field_data = f[ptype][field].value.astype("float64")
- else:
- fname = field[:field.rfind("_")]
- field_data = f[ptype][fname].value.astype("float64")
- my_div = field_data.size / pcount
- if my_div > 1:
- field_data = np.resize(field_data, (pcount, my_div))
- findex = int(field[field.rfind("_") + 1:])
- field_data = field_data[:, findex]
- data = field_data[mask]
- yield (ptype, field), data
-
- def _initialize_index(self, data_file, regions):
- pcount = sum(self._count_particles(data_file).values())
- morton = np.empty(pcount, dtype='uint64')
- mylog.debug("Initializing index % 5i (% 7i particles)",
- data_file.file_id, pcount)
- ind = 0
- with h5py.File(data_file.filename, "r") as f:
- if not f.keys(): return None
- dx = np.finfo(f["FOF"]['CenterOfMass'].dtype).eps
- dx = 2.0*self.pf.quan(dx, "code_length")
-
- for ptype, pattr in zip(["FOF", "SUBFIND"],
- ["Number_of_groups", "Number_of_subgroups"]):
- my_pcount = f[ptype].attrs[pattr]
- pos = f[ptype]["CenterOfMass"].value.astype("float64")
- pos = np.resize(pos, (my_pcount, 3))
- pos = data_file.pf.arr(pos, "code_length")
-
- # These are 32 bit numbers, so we give a little lee-way.
- # Otherwise, for big sets of particles, we often will bump into the
- # domain edges. This helps alleviate that.
- np.clip(pos, self.pf.domain_left_edge + dx,
- self.pf.domain_right_edge - dx, pos)
- if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
- np.any(pos.max(axis=0) > self.pf.domain_right_edge):
- raise YTDomainOverflow(pos.min(axis=0),
- pos.max(axis=0),
- self.pf.domain_left_edge,
- self.pf.domain_right_edge)
- regions.add_data_file(pos, data_file.file_id)
- morton[ind:ind+pos.shape[0]] = compute_morton(
- pos[:,0], pos[:,1], pos[:,2],
- data_file.pf.domain_left_edge,
- data_file.pf.domain_right_edge)
- ind += pos.shape[0]
- return morton
-
- def _count_particles(self, data_file):
- with h5py.File(data_file.filename, "r") as f:
- # We need this to figure out where the offset fields are stored.
- data_file.total_offset = f["SUBFIND"].attrs["Number_of_groups"]
- return {"FOF": f["FOF"].attrs["Number_of_groups"],
- "SUBFIND": f["FOF"].attrs["Number_of_subgroups"]}
-
- def _identify_fields(self, data_file):
- fields = [(ptype, "particle_identifier")
- for ptype in self.pf.particle_types_raw]
- pcount = data_file.total_particles
- with h5py.File(data_file.filename, "r") as f:
- for ptype in self.pf.particle_types_raw:
- my_fields, my_offset_fields = \
- subfind_field_list(f[ptype], ptype, data_file.total_particles)
- fields.extend(my_fields)
- self.offset_fields = self.offset_fields.union(set(my_offset_fields))
- return fields, {}
-
-def subfind_field_list(fh, ptype, pcount):
- fields = []
- offset_fields = []
- for field in fh.keys():
- if "PartType" in field:
- # These are halo member particles
- continue
- elif isinstance(fh[field], h5py.Group):
- my_fields, my_offset_fields = \
- subfind_field_list(fh[field], ptype, pcount)
- fields.extend(my_fields)
- my_offset_fields.extend(offset_fields)
- else:
- if not fh[field].size % pcount[ptype]:
- my_div = fh[field].size / pcount[ptype]
- fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
- if my_div > 1:
- for i in range(my_div):
- fields.append((ptype, "%s_%d" % (fname, i)))
- else:
- fields.append((ptype, fname))
- elif ptype == "SUBFIND" and \
- not fh[field].size % fh["/SUBFIND"].attrs["Number_of_groups"]:
- # These are actually FOF fields, but they were written after
- # a load balancing step moved halos around and thus they do not
- # correspond to the halos stored in the FOF group.
- my_div = fh[field].size / fh["/SUBFIND"].attrs["Number_of_groups"]
- fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
- if my_div > 1:
- for i in range(my_div):
- fields.append(("FOF", "%s_%d" % (fname, i)))
- else:
- fields.append(("FOF", fname))
- offset_fields.append(fname)
- else:
- mylog.warn("Cannot add field (%s, %s) with size %d." % \
- (ptype, fh[field].name, fh[field].size))
- continue
- return fields, offset_fields
https://bitbucket.org/yt_analysis/yt/commits/fe9929e027bc/
Changeset: fe9929e027bc
Branch: yt-3.0
User: brittonsmith
Date: 2014-05-03 06:31:22
Summary: Returning field check to all files for super class and adding subclass function for this frontend to check only one file.
Affected #: 2 files
diff -r 2d9d69de8ff79eb5e12c64a296da55e7004c63b2 -r fe9929e027bc53551d829befe29f7046be90ca83 yt/frontends/halo_catalogs/owls_subfind/data_structures.py
--- a/yt/frontends/halo_catalogs/owls_subfind/data_structures.py
+++ b/yt/frontends/halo_catalogs/owls_subfind/data_structures.py
@@ -75,7 +75,25 @@
iend = np.clip(np.digitize(fofend, subend), 0, ifof.size - 2)
for i, data_file in enumerate(self.data_files):
data_file.offset_files = self.data_files[istart[i]: iend[i] + 1]
-
+
+ def _detect_output_fields(self):
+ # TODO: Add additional fields
+ pfl = []
+ units = {}
+ for dom in self.data_files[:1]:
+ fl, _units = self.io._identify_fields(dom)
+ units.update(_units)
+ dom._calculate_offsets(fl)
+ for f in fl:
+ if f not in pfl: pfl.append(f)
+ self.field_list = pfl
+ pf = self.parameter_file
+ pf.particle_types = tuple(set(pt for pt, pf in pfl))
+ # This is an attribute that means these particle types *actually*
+ # exist. As in, they are real, in the dataset.
+ pf.field_units.update(units)
+ pf.particle_types_raw = pf.particle_types
+
def _setup_geometry(self):
super(OWLSSubfindParticleIndex, self)._setup_geometry()
self._calculate_particle_index_starts()
diff -r 2d9d69de8ff79eb5e12c64a296da55e7004c63b2 -r fe9929e027bc53551d829befe29f7046be90ca83 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -118,7 +118,7 @@
# TODO: Add additional fields
pfl = []
units = {}
- for dom in self.data_files[:1]:
+ for dom in self.data_files:
fl, _units = self.io._identify_fields(dom)
units.update(_units)
dom._calculate_offsets(fl)
https://bitbucket.org/yt_analysis/yt/commits/010875852530/
Changeset: 010875852530
Branch: yt-3.0
User: brittonsmith
Date: 2014-05-05 17:41:49
Summary: Removing some unnecessary units stuff.
Affected #: 1 file
diff -r fe9929e027bc53551d829befe29f7046be90ca83 -r 01087585253088ec6e966a13808f3e4f6844bd78 yt/frontends/halo_catalogs/owls_subfind/data_structures.py
--- a/yt/frontends/halo_catalogs/owls_subfind/data_structures.py
+++ b/yt/frontends/halo_catalogs/owls_subfind/data_structures.py
@@ -159,8 +159,6 @@
self._unit_base = {}
self._unit_base.update(
(str(k), v) for k, v in handle["/Units"].attrs.items())
- # Comoving cm is given in the Units
- self._unit_base['cmcm'] = 1.0 / self._unit_base["UnitLength_in_cm"]
handle.close()
def _set_code_unit_attributes(self):
https://bitbucket.org/yt_analysis/yt/commits/2baafb904700/
Changeset: 2baafb904700
Branch: yt-3.0
User: MatthewTurk
Date: 2014-05-05 17:48:37
Summary: Merged in brittonsmith/yt/yt-3.0 (pull request #867)
OWLSSubfind frontend.
Affected #: 8 files
diff -r 85cf74b2256cfe93057f6858619f5f2fdbf4e55f -r 2baafb904700f2eaccb6af2f6d545db6c2a286ca yt/frontends/halo_catalogs/api.py
--- a/yt/frontends/halo_catalogs/api.py
+++ b/yt/frontends/halo_catalogs/api.py
@@ -23,3 +23,8 @@
RockstarDataset, \
IOHandlerRockstarBinary, \
RockstarFieldInfo
+
+from .owls_subfind.api import \
+ OWLSSubfindDataset, \
+ IOHandlerOWLSSubfindHDF5, \
+ OWLSSubfindFieldInfo
diff -r 85cf74b2256cfe93057f6858619f5f2fdbf4e55f -r 2baafb904700f2eaccb6af2f6d545db6c2a286ca yt/frontends/halo_catalogs/owls_subfind/__init__.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/owls_subfind/__init__.py
@@ -0,0 +1,15 @@
+"""
+API for HaloCatalog frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
diff -r 85cf74b2256cfe93057f6858619f5f2fdbf4e55f -r 2baafb904700f2eaccb6af2f6d545db6c2a286ca yt/frontends/halo_catalogs/owls_subfind/api.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/owls_subfind/api.py
@@ -0,0 +1,24 @@
+"""
+API for OWLSSubfind frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+ OWLSSubfindDataset
+
+from .io import \
+ IOHandlerOWLSSubfindHDF5
+
+from .fields import \
+ OWLSSubfindFieldInfo
diff -r 85cf74b2256cfe93057f6858619f5f2fdbf4e55f -r 2baafb904700f2eaccb6af2f6d545db6c2a286ca yt/frontends/halo_catalogs/owls_subfind/data_structures.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/owls_subfind/data_structures.py
@@ -0,0 +1,221 @@
+"""
+Data structures for OWLSSubfind frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from collections import defaultdict
+import h5py
+import numpy as np
+import stat
+import weakref
+import struct
+import glob
+import time
+import os
+
+from .fields import \
+ OWLSSubfindFieldInfo
+
+from yt.utilities.cosmology import Cosmology
+from yt.utilities.definitions import \
+ mpc_conversion, sec_conversion
+from yt.utilities.exceptions import \
+ YTException
+from yt.geometry.particle_geometry_handler import \
+ ParticleIndex
+from yt.data_objects.static_output import \
+ Dataset, \
+ ParticleFile
+from yt.frontends.sph.data_structures import \
+ _fix_unit_ordering
+import yt.utilities.fortran_utils as fpu
+from yt.units.yt_array import \
+ YTArray, \
+ YTQuantity
+
+class OWLSSubfindParticleIndex(ParticleIndex):
+ def __init__(self, pf, dataset_type):
+ super(OWLSSubfindParticleIndex, self).__init__(pf, dataset_type)
+
+ def _calculate_particle_index_starts(self):
+ # Halo indices are not saved in the file, so we must count by hand.
+ # File 0 has halos 0 to N_0 - 1, file 1 has halos N_0 to N_0 + N_1 - 1, etc.
+ particle_count = defaultdict(int)
+ offset_count = 0
+ for data_file in self.data_files:
+ data_file.index_start = dict([(ptype, particle_count[ptype]) for
+ ptype in data_file.total_particles])
+ data_file.offset_start = offset_count
+ for ptype in data_file.total_particles:
+ particle_count[ptype] += data_file.total_particles[ptype]
+ offset_count += data_file.total_offset
+
+ def _calculate_file_offset_map(self):
+ # After the FOF is performed, a load-balancing step redistributes halos
+ # and then writes more fields. Here, for each file, we create a list of
+ # files which contain the rest of the redistributed particles.
+ ifof = np.array([data_file.total_particles["FOF"]
+ for data_file in self.data_files])
+ isub = np.array([data_file.total_offset
+ for data_file in self.data_files])
+ subend = isub.cumsum()
+ fofend = ifof.cumsum()
+ istart = np.digitize(fofend - ifof, subend - isub) - 1
+ iend = np.clip(np.digitize(fofend, subend), 0, ifof.size - 2)
+ for i, data_file in enumerate(self.data_files):
+ data_file.offset_files = self.data_files[istart[i]: iend[i] + 1]
+
+ def _detect_output_fields(self):
+ # TODO: Add additional fields
+ pfl = []
+ units = {}
+ for dom in self.data_files[:1]:
+ fl, _units = self.io._identify_fields(dom)
+ units.update(_units)
+ dom._calculate_offsets(fl)
+ for f in fl:
+ if f not in pfl: pfl.append(f)
+ self.field_list = pfl
+ pf = self.parameter_file
+ pf.particle_types = tuple(set(pt for pt, pf in pfl))
+ # This is an attribute that means these particle types *actually*
+ # exist. As in, they are real, in the dataset.
+ pf.field_units.update(units)
+ pf.particle_types_raw = pf.particle_types
+
+ def _setup_geometry(self):
+ super(OWLSSubfindParticleIndex, self)._setup_geometry()
+ self._calculate_particle_index_starts()
+ self._calculate_file_offset_map()
+
+class OWLSSubfindHDF5File(ParticleFile):
+ def __init__(self, pf, io, filename, file_id):
+ super(OWLSSubfindHDF5File, self).__init__(pf, io, filename, file_id)
+ with h5py.File(filename, "r") as f:
+ self.header = dict((field, f.attrs[field]) \
+ for field in f.attrs.keys())
+
+class OWLSSubfindDataset(Dataset):
+ _index_class = OWLSSubfindParticleIndex
+ _file_class = OWLSSubfindHDF5File
+ _field_info_class = OWLSSubfindFieldInfo
+ _suffix = ".hdf5"
+
+ def __init__(self, filename, dataset_type="subfind_hdf5",
+ n_ref = 16, over_refine_factor = 1):
+ self.n_ref = n_ref
+ self.over_refine_factor = over_refine_factor
+ super(OWLSSubfindDataset, self).__init__(filename, dataset_type)
+
+ def _parse_parameter_file(self):
+ handle = h5py.File(self.parameter_filename, mode="r")
+ hvals = {}
+ hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
+ hvals["NumFiles"] = hvals["NumFilesPerSnapshot"]
+ hvals["Massarr"] = hvals["MassTable"]
+
+ self.dimensionality = 3
+ self.refine_by = 2
+ self.unique_identifier = \
+ int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+ # Set standard values
+ self.current_time = self.quan(hvals["Time_GYR"] * sec_conversion["Gyr"], "s")
+ self.domain_left_edge = np.zeros(3, "float64")
+ self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
+ nz = 1 << self.over_refine_factor
+ self.domain_dimensions = np.ones(3, "int32") * nz
+ self.cosmological_simulation = 1
+ self.periodicity = (True, True, True)
+ self.current_redshift = hvals["Redshift"]
+ self.omega_lambda = hvals["OmegaLambda"]
+ self.omega_matter = hvals["Omega0"]
+ self.hubble_constant = hvals["HubbleParam"]
+ self.parameters = hvals
+ prefix = os.path.abspath(
+ os.path.join(os.path.dirname(self.parameter_filename),
+ os.path.basename(self.parameter_filename).split(".", 1)[0]))
+
+ suffix = self.parameter_filename.rsplit(".", 1)[-1]
+ self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix)
+ self.file_count = len(glob.glob(prefix + "*" + self._suffix))
+ if self.file_count == 0:
+ raise YTException(message="No data files found.", pf=self)
+ self.particle_types = ("FOF", "SUBFIND")
+ self.particle_types_raw = ("FOF", "SUBFIND")
+
+ # To avoid having to open files twice
+ self._unit_base = {}
+ self._unit_base.update(
+ (str(k), v) for k, v in handle["/Units"].attrs.items())
+ handle.close()
+
+ def _set_code_unit_attributes(self):
+ # Set a sane default for cosmological simulations.
+ if self._unit_base is None and self.cosmological_simulation == 1:
+ mylog.info("Assuming length units are in Mpc/h (comoving)")
+ self._unit_base = dict(length = (1.0, "Mpccm/h"))
+ # The other same defaults we will use from the standard Gadget
+ # defaults.
+ unit_base = self._unit_base or {}
+ if "length" in unit_base:
+ length_unit = unit_base["length"]
+ elif "UnitLength_in_cm" in unit_base:
+ if self.cosmological_simulation == 0:
+ length_unit = (unit_base["UnitLength_in_cm"], "cm")
+ else:
+ length_unit = (unit_base["UnitLength_in_cm"], "cmcm/h")
+ else:
+ raise RuntimeError
+ length_unit = _fix_unit_ordering(length_unit)
+ self.length_unit = self.quan(length_unit[0], length_unit[1])
+
+ unit_base = self._unit_base or {}
+ if "velocity" in unit_base:
+ velocity_unit = unit_base["velocity"]
+ elif "UnitVelocity_in_cm_per_s" in unit_base:
+ velocity_unit = (unit_base["UnitVelocity_in_cm_per_s"], "cm/s")
+ else:
+ velocity_unit = (1e5, "cm/s")
+ velocity_unit = _fix_unit_ordering(velocity_unit)
+ self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
+ # We set hubble_constant = 1.0 for non-cosmology, so this is safe.
+ # Default to 1e10 Msun/h if mass is not specified.
+ if "mass" in unit_base:
+ mass_unit = unit_base["mass"]
+ elif "UnitMass_in_g" in unit_base:
+ if self.cosmological_simulation == 0:
+ mass_unit = (unit_base["UnitMass_in_g"], "g")
+ else:
+ mass_unit = (unit_base["UnitMass_in_g"], "g/h")
+ else:
+ # Sane default
+ mass_unit = (1.0, "1e10*Msun/h")
+ mass_unit = _fix_unit_ordering(mass_unit)
+ self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
+ self.time_unit = self.quan(unit_base["UnitTime_in_s"], "s")
+
+ @classmethod
+ def _is_valid(self, *args, **kwargs):
+ try:
+ fileh = h5py.File(args[0], mode='r')
+ if "Constants" in fileh["/"].keys() and \
+ "Header" in fileh["/"].keys() and \
+ "SUBFIND" in fileh["/"].keys():
+ fileh.close()
+ return True
+ fileh.close()
+ except:
+ pass
+ return False
diff -r 85cf74b2256cfe93057f6858619f5f2fdbf4e55f -r 2baafb904700f2eaccb6af2f6d545db6c2a286ca yt/frontends/halo_catalogs/owls_subfind/fields.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/owls_subfind/fields.py
@@ -0,0 +1,58 @@
+"""
+OWLSSubfind-specific fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.funcs import mylog
+from yt.fields.field_info_container import \
+ FieldInfoContainer
+from yt.units.yt_array import \
+ YTArray
+
+m_units = "code_mass"
+mdot_units = "code_mass / code_time"
+p_units = "Mpccm/h"
+v_units = "1e5 * cmcm / s"
+
+class OWLSSubfindFieldInfo(FieldInfoContainer):
+ known_other_fields = (
+ )
+
+ known_particle_fields = (
+ ("CenterOfMass_0", (p_units, ["particle_position_x"], None)),
+ ("CenterOfMass_1", (p_units, ["particle_position_y"], None)),
+ ("CenterOfMass_2", (p_units, ["particle_position_z"], None)),
+ ("CenterOfMassVelocity_0", (v_units, ["particle_velocity_x"], None)),
+ ("CenterOfMassVelocity_1", (v_units, ["particle_velocity_y"], None)),
+ ("CenterOfMassVelocity_2", (v_units, ["particle_velocity_z"], None)),
+ ("Mass", (m_units, ["particle_mass"], None)),
+ ("Halo_M_Crit200", (m_units, ["Virial Mass"], None)),
+ ("Halo_M_Crit2500", (m_units, [], None)),
+ ("Halo_M_Crit500", (m_units, [], None)),
+ ("Halo_M_Mean200", (m_units, [], None)),
+ ("Halo_M_Mean2500", (m_units, [], None)),
+ ("Halo_M_Mean500", (m_units, [], None)),
+ ("Halo_M_TopHat200", (m_units, [], None)),
+ ("Halo_R_Crit200", (p_units, ["Virial Radius"], None)),
+ ("Halo_R_Crit2500", (p_units, [], None)),
+ ("Halo_R_Crit500", (p_units, [], None)),
+ ("Halo_R_Mean200", (p_units, [], None)),
+ ("Halo_R_Mean2500", (p_units, [], None)),
+ ("Halo_R_Mean500", (p_units, [], None)),
+ ("Halo_R_TopHat200", (p_units, [], None)),
+ ("BH_Mass", (m_units, [], None)),
+ ("Stars/Mass", (m_units, [], None)),
+ ("BH_Mdot", (mdot_units, [], None)),
+ ("StarFormationRate", (mdot_units, [], None)),
+)
diff -r 85cf74b2256cfe93057f6858619f5f2fdbf4e55f -r 2baafb904700f2eaccb6af2f6d545db6c2a286ca yt/frontends/halo_catalogs/owls_subfind/io.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/owls_subfind/io.py
@@ -0,0 +1,209 @@
+"""
+OWLSSubfind data-file handling function
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+
+from yt.utilities.exceptions import *
+from yt.funcs import mylog
+
+from yt.utilities.io_handler import \
+ BaseIOHandler
+
+from yt.utilities.lib.geometry_utils import compute_morton
+
+from yt.geometry.oct_container import _ORDER_MAX
+
+class IOHandlerOWLSSubfindHDF5(BaseIOHandler):
+ _dataset_type = "subfind_hdf5"
+
+ def __init__(self, pf):
+ super(IOHandlerOWLSSubfindHDF5, self).__init__(pf)
+ self.offset_fields = set([])
+
+ def _read_fluid_selection(self, chunks, selector, fields, size):
+ raise NotImplementedError
+
+ def _read_particle_coords(self, chunks, ptf):
+ # This will read chunks and yield the results.
+ chunks = list(chunks)
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype, field_list in sorted(ptf.items()):
+ pcount = data_file.total_particles[ptype]
+ coords = f[ptype]["CenterOfMass"].value.astype("float64")
+ coords = np.resize(coords, (pcount, 3))
+ x = coords[:, 0]
+ y = coords[:, 1]
+ z = coords[:, 2]
+ yield ptype, (x, y, z)
+
+ def _read_offset_particle_field(self, field, data_file, fh):
+ field_data = np.empty(data_file.total_particles["FOF"], dtype="float64")
+ fofindex = np.arange(data_file.total_particles["FOF"]) + data_file.index_start["FOF"]
+ for offset_file in data_file.offset_files:
+ if fh.filename == offset_file.filename:
+ ofh = fh
+ else:
+ ofh = h5py.File(offset_file.filename, "r")
+ subindex = np.arange(offset_file.total_offset) + offset_file.offset_start
+ substart = max(fofindex[0] - subindex[0], 0)
+ subend = min(fofindex[-1] - subindex[0], subindex.size - 1)
+ fofstart = substart + subindex[0] - fofindex[0]
+ fofend = subend + subindex[0] - fofindex[0]
+ field_data[fofstart:fofend + 1] = ofh["SUBFIND"][field][substart:subend + 1]
+ return field_data
+
+ def _read_particle_fields(self, chunks, ptf, selector):
+ # Now we have all the sizes, and we can allocate
+ chunks = list(chunks)
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype, field_list in sorted(ptf.items()):
+ pcount = data_file.total_particles[ptype]
+ coords = f[ptype]["CenterOfMass"].value.astype("float64")
+ coords = np.resize(coords, (pcount, 3))
+ x = coords[:, 0]
+ y = coords[:, 1]
+ z = coords[:, 2]
+ mask = selector.select_points(x, y, z)
+ del x, y, z
+ if mask is None: continue
+ for field in field_list:
+ if field in self.offset_fields:
+ field_data = \
+ self._read_offset_particle_field(field, data_file, f)
+ else:
+ if field == "particle_identifier":
+ field_data = \
+ np.arange(data_file.total_particles[ptype]) + \
+ data_file.index_start[ptype]
+ elif field in f[ptype]:
+ field_data = f[ptype][field].value.astype("float64")
+ else:
+ fname = field[:field.rfind("_")]
+ field_data = f[ptype][fname].value.astype("float64")
+ my_div = field_data.size / pcount
+ if my_div > 1:
+ field_data = np.resize(field_data, (pcount, my_div))
+ findex = int(field[field.rfind("_") + 1:])
+ field_data = field_data[:, findex]
+ data = field_data[mask]
+ yield (ptype, field), data
+
+ def _initialize_index(self, data_file, regions):
+ pcount = sum(self._count_particles(data_file).values())
+ morton = np.empty(pcount, dtype='uint64')
+ mylog.debug("Initializing index % 5i (% 7i particles)",
+ data_file.file_id, pcount)
+ ind = 0
+ with h5py.File(data_file.filename, "r") as f:
+ if not f.keys(): return None
+ dx = np.finfo(f["FOF"]['CenterOfMass'].dtype).eps
+ dx = 2.0*self.pf.quan(dx, "code_length")
+
+ for ptype, pattr in zip(["FOF", "SUBFIND"],
+ ["Number_of_groups", "Number_of_subgroups"]):
+ my_pcount = f[ptype].attrs[pattr]
+ pos = f[ptype]["CenterOfMass"].value.astype("float64")
+ pos = np.resize(pos, (my_pcount, 3))
+ pos = data_file.pf.arr(pos, "code_length")
+
+ # These are 32 bit numbers, so we give a little lee-way.
+ # Otherwise, for big sets of particles, we often will bump into the
+ # domain edges. This helps alleviate that.
+ np.clip(pos, self.pf.domain_left_edge + dx,
+ self.pf.domain_right_edge - dx, pos)
+ if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
+ np.any(pos.max(axis=0) > self.pf.domain_right_edge):
+ raise YTDomainOverflow(pos.min(axis=0),
+ pos.max(axis=0),
+ self.pf.domain_left_edge,
+ self.pf.domain_right_edge)
+ regions.add_data_file(pos, data_file.file_id)
+ morton[ind:ind+pos.shape[0]] = compute_morton(
+ pos[:,0], pos[:,1], pos[:,2],
+ data_file.pf.domain_left_edge,
+ data_file.pf.domain_right_edge)
+ ind += pos.shape[0]
+ return morton
+
+ def _count_particles(self, data_file):
+ with h5py.File(data_file.filename, "r") as f:
+ # We need this to figure out where the offset fields are stored.
+ data_file.total_offset = f["SUBFIND"].attrs["Number_of_groups"]
+ return {"FOF": f["FOF"].attrs["Number_of_groups"],
+ "SUBFIND": f["FOF"].attrs["Number_of_subgroups"]}
+
+ def _identify_fields(self, data_file):
+ fields = [(ptype, "particle_identifier")
+ for ptype in self.pf.particle_types_raw]
+ pcount = data_file.total_particles
+ with h5py.File(data_file.filename, "r") as f:
+ for ptype in self.pf.particle_types_raw:
+ my_fields, my_offset_fields = \
+ subfind_field_list(f[ptype], ptype, data_file.total_particles)
+ fields.extend(my_fields)
+ self.offset_fields = self.offset_fields.union(set(my_offset_fields))
+ return fields, {}
+
+def subfind_field_list(fh, ptype, pcount):
+ fields = []
+ offset_fields = []
+ for field in fh.keys():
+ if "PartType" in field:
+ # These are halo member particles
+ continue
+ elif isinstance(fh[field], h5py.Group):
+ my_fields, my_offset_fields = \
+ subfind_field_list(fh[field], ptype, pcount)
+ fields.extend(my_fields)
+ my_offset_fields.extend(offset_fields)
+ else:
+ if not fh[field].size % pcount[ptype]:
+ my_div = fh[field].size / pcount[ptype]
+ fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
+ if my_div > 1:
+ for i in range(my_div):
+ fields.append((ptype, "%s_%d" % (fname, i)))
+ else:
+ fields.append((ptype, fname))
+ elif ptype == "SUBFIND" and \
+ not fh[field].size % fh["/SUBFIND"].attrs["Number_of_groups"]:
+ # These are actually FOF fields, but they were written after
+ # a load balancing step moved halos around and thus they do not
+ # correspond to the halos stored in the FOF group.
+ my_div = fh[field].size / fh["/SUBFIND"].attrs["Number_of_groups"]
+ fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
+ if my_div > 1:
+ for i in range(my_div):
+ fields.append(("FOF", "%s_%d" % (fname, i)))
+ else:
+ fields.append(("FOF", fname))
+ offset_fields.append(fname)
+ else:
+ mylog.warn("Cannot add field (%s, %s) with size %d." % \
+ (ptype, fh[field].name, fh[field].size))
+ continue
+ return fields, offset_fields
diff -r 85cf74b2256cfe93057f6858619f5f2fdbf4e55f -r 2baafb904700f2eaccb6af2f6d545db6c2a286ca yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -343,7 +343,8 @@
try:
fileh = h5py.File(args[0], mode='r')
if "Constants" in fileh["/"].keys() and \
- "Header" in fileh["/"].keys():
+ "Header" in fileh["/"].keys() and \
+ "SUBFIND" not in fileh["/"].keys():
fileh.close()
return True
fileh.close()
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list