[yt-svn] commit/yt: 4 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Fri Jan 10 10:12:36 PST 2014
4 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/ecee33799c2f/
Changeset: ecee33799c2f
Branch: yt-3.0
User: bcrosby
Date: 2014-01-10 17:19:15
Summary: First steps in adding _read_fluid_selection() to Enzo frontend for outputs in memory
Affected #: 1 file
diff -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 -r ecee33799c2f02baad66c5b775d00a178598bc52 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -240,6 +240,50 @@
# In-place unit conversion requires we return a copy
return tr.copy()
+ def _read_fluid_selection(self, chunks, selector, fields, size):
+ rv = {}
+ # Now we have to do something unpleasant
+ chunks = list(chunks)
+ if selector.__class__.__name__ == "GridSelector":
+ print "in the first case of _read_fluid_selection"
+ if not (len(chunks) == len(chunks[0].objs) == 1):
+ raise RuntimeError
+ g = chunks[0].objs[0]
+ for ftype, fname in fields:
+ rv[(ftype, fname)] = self.grids_in_memory[grid.id][fname].swapaxes(0,2)
+ return rv
+ if size is None:
+ size = sum((g.count(selector) for chunk in chunks
+ for g in chunk.objs))
+
+ # this probably fine as-is
+ for field in fields:
+ ftype, fname = field
+ fsize = size
+ rv[field] = np.empty(fsize, dtype="float64")
+ ng = sum(len(c.objs) for c in chunks)
+ mylog.debug("Reading %s cells of %s fields in %s grids",
+ size, [f2 for f1, f2 in fields], ng)
+
+ # not totally sure about this piece... is this needed if the output is in memory?
+ ind = 0
+ for chunk in chunks:
+ fid = None
+ for g in chunk.objs:
+ if g.filename is None: continue
+ if fid is None:
+ fid = h5py.h5f.open(g.filename, h5py.h5f.ACC_RDONLY)
+ data = np.empty(g.ActiveDimensions[::-1], dtype="float64")
+ data_view = data.swapaxes(0,2)
+ for field in fields:
+ ftype, fname = field
+ dg = h5py.h5d.open(fid, "/Grid%08i/%s" % (g.id, fname))
+ dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
+ nd = g.select(selector, data_view, rv[field], ind) # caches
+ ind += nd
+ if fid: fid.close()
+ return rv
+
@property
def _read_exception(self):
return KeyError
https://bitbucket.org/yt_analysis/yt/commits/3e4844effef2/
Changeset: 3e4844effef2
Branch: yt-3.0
User: bcrosby
Date: 2014-01-10 18:05:38
Summary: merged
Affected #: 6 files
diff -r ecee33799c2f02baad66c5b775d00a178598bc52 -r 3e4844effef2dcc0f93216890374d3554f819c33 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -264,7 +264,14 @@
def _get_hosts(self):
if self.comm.rank == 0 or self.comm.size == 1:
- server_address = socket.gethostname()
+
+ #Temporary mac hostname fix
+ try:
+ server_address = socket.gethostname()
+ socket.gethostbyname(server_address)
+ except socket.gaierror:
+ server_address = "localhost"
+
sock = socket.socket()
sock.bind(('', 0))
port = sock.getsockname()[-1]
diff -r ecee33799c2f02baad66c5b775d00a178598bc52 -r 3e4844effef2dcc0f93216890374d3554f819c33 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -39,12 +39,13 @@
f = h5py.File(grid.filename, "r")
group = f["/Grid%08i" % grid.id]
fields = []
+ add_io = "io" in grid.pf.particle_types
for name, v in group.iteritems():
# NOTE: This won't work with 1D datasets.
if not hasattr(v, "shape"):
continue
elif len(v.dims) == 1:
- fields.append( ("io", str(name)) )
+ if add_io: fields.append( ("io", str(name)) )
else:
fields.append( ("gas", str(name)) )
f.close()
diff -r ecee33799c2f02baad66c5b775d00a178598bc52 -r 3e4844effef2dcc0f93216890374d3554f819c33 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -100,26 +100,6 @@
data = p_fields[start:end, fi]
yield (ptype, field), data[mask]
- def _read_data_set(self, grid, field):
- f = self._handle
- f_part = self._particle_handle
- if field in self._particle_fields:
- if grid.NumberOfParticles == 0: return np.array([], dtype='float64')
- start = self.pf.h._particle_indices[grid.id - grid._id_offset]
- end = self.pf.h._particle_indices[grid.id - grid._id_offset + 1]
- fi = self._particle_fields[field]
- tr = f_part["/tracer particles"][start:end, fi]
- else:
- tr = f["/%s" % field][grid.id - grid._id_offset,:,:,:].transpose()
- return tr.astype("float64")
-
- def _read_data_slice(self, grid, field, axis, coord):
- sl = [slice(None), slice(None), slice(None)]
- sl[axis] = slice(coord, coord + 1)
- f = self._handle
- tr = f["/%s" % field][grid.id - grid._id_offset].transpose()[sl]
- return tr.astype("float64")
-
def _read_fluid_selection(self, chunks, selector, fields, size):
chunks = list(chunks)
if any((ftype != "gas" for ftype, fname in fields)):
@@ -129,8 +109,8 @@
for field in fields:
ftype, fname = field
dt = f["/%s" % fname].dtype
- if dt == "float32": dt = "float64"
- rv[field] = np.empty(size, dtype=dt)
+ # Always use *native* 64-bit float.
+ rv[field] = np.empty(size, dtype="=f8")
ng = sum(len(c.objs) for c in chunks)
mylog.debug("Reading %s cells of %s fields in %s blocks",
size, [f2 for f1, f2 in fields], ng)
@@ -161,6 +141,6 @@
end = gs[-1].id - gs[-1]._id_offset + 1
data = ds[start:end,:,:,:].transpose()
for i, g in enumerate(gs):
- rv[g.id][field] = data[...,i]
+ rv[g.id][field] = np.asarray(data[...,i], "=f8")
return rv
diff -r ecee33799c2f02baad66c5b775d00a178598bc52 -r 3e4844effef2dcc0f93216890374d3554f819c33 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -22,6 +22,7 @@
import glob
import time
import os
+import types
from yt.utilities.fortran_utils import read_record
from yt.utilities.logger import ytLogger as mylog
@@ -50,6 +51,10 @@
from yt.fields.particle_fields import \
particle_deposition_functions, \
standard_particle_fields
+from .definitions import \
+ gadget_header_specs, \
+ gadget_field_specs, \
+ gadget_ptype_specs
try:
import requests
@@ -147,29 +152,21 @@
_particle_coordinates_name = "Coordinates"
_particle_velocity_name = "Velocities"
_suffix = ""
- _header_spec = (('Npart', 6, 'i'),
- ('Massarr', 6, 'd'),
- ('Time', 1, 'd'),
- ('Redshift', 1, 'd'),
- ('FlagSfr', 1, 'i'),
- ('FlagFeedback', 1, 'i'),
- ('Nall', 6, 'i'),
- ('FlagCooling', 1, 'i'),
- ('NumFiles', 1, 'i'),
- ('BoxSize', 1, 'd'),
- ('Omega0', 1, 'd'),
- ('OmegaLambda', 1, 'd'),
- ('HubbleParam', 1, 'd'),
- ('FlagAge', 1, 'i'),
- ('FlagMEtals', 1, 'i'),
- ('NallHW', 6, 'i'),
- ('unused', 16, 'i'))
def __init__(self, filename, data_style="gadget_binary",
additional_fields=(),
unit_base=None, n_ref=64,
over_refine_factor=1,
- bounding_box = None):
+ bounding_box = None,
+ header_spec = "default",
+ field_spec = "default",
+ ptype_spec = "default"):
+ self._header_spec = self._setup_binary_spec(
+ header_spec, gadget_header_specs)
+ self._field_spec = self._setup_binary_spec(
+ field_spec, gadget_field_specs)
+ self._ptype_spec = self._setup_binary_spec(
+ ptype_spec, gadget_ptype_specs)
self.n_ref = n_ref
self.over_refine_factor = over_refine_factor
self.storage_filename = None
@@ -188,6 +185,14 @@
self.domain_left_edge = self.domain_right_edge = None
super(GadgetStaticOutput, self).__init__(filename, data_style)
+ def _setup_binary_spec(self, spec, spec_dict):
+ if isinstance(spec, types.StringTypes):
+ _hs = ()
+ for hs in spec.split("+"):
+ _hs += spec_dict[hs]
+ spec = _hs
+ return spec
+
def __repr__(self):
return os.path.basename(self.parameter_filename).split(".")[0]
diff -r ecee33799c2f02baad66c5b775d00a178598bc52 -r 3e4844effef2dcc0f93216890374d3554f819c33 yt/frontends/sph/definitions.py
--- a/yt/frontends/sph/definitions.py
+++ b/yt/frontends/sph/definitions.py
@@ -3,3 +3,46 @@
ghdf5_ptypes = ("PartType0", "PartType1", "PartType2", "PartType3",
"PartType4", "PartType5")
+gadget_header_specs = dict(
+ default = (('Npart', 6, 'i'),
+ ('Massarr', 6, 'd'),
+ ('Time', 1, 'd'),
+ ('Redshift', 1, 'd'),
+ ('FlagSfr', 1, 'i'),
+ ('FlagFeedback', 1, 'i'),
+ ('Nall', 6, 'i'),
+ ('FlagCooling', 1, 'i'),
+ ('NumFiles', 1, 'i'),
+ ('BoxSize', 1, 'd'),
+ ('Omega0', 1, 'd'),
+ ('OmegaLambda', 1, 'd'),
+ ('HubbleParam', 1, 'd'),
+ ('FlagAge', 1, 'i'),
+ ('FlagMEtals', 1, 'i'),
+ ('NallHW', 6, 'i'),
+ ('unused', 16, 'i')),
+ pad32 = (('empty', 32, 'c'),),
+ pad64 = (('empty', 64, 'c'),),
+ pad128 = (('empty', 128, 'c'),),
+ pad256 = (('empty', 256, 'c'),),
+)
+
+gadget_ptype_specs = dict(
+ default = ( "Gas",
+ "Halo",
+ "Disk",
+ "Bulge",
+ "Stars",
+ "Bndry" )
+)
+
+gadget_field_specs = dict(
+ default = ( "Coordinates",
+ "Velocities",
+ "ParticleIDs",
+ "Mass",
+ ("InternalEnergy", "Gas"),
+ ("Density", "Gas"),
+ ("SmoothingLength", "Gas"),
+ )
+)
diff -r ecee33799c2f02baad66c5b775d00a178598bc52 -r 3e4844effef2dcc0f93216890374d3554f819c33 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -174,12 +174,6 @@
_vector_fields = ("Coordinates", "Velocity", "Velocities")
# Particle types (Table 3 in GADGET-2 user guide)
- _ptypes = ( "Gas",
- "Halo",
- "Disk",
- "Bulge",
- "Stars",
- "Bndry" )
#
# Blocks in the file:
# HEAD
@@ -195,16 +189,12 @@
# ENDT (only if enabled in makefile)
# TSTP (only if enabled in makefile)
- _fields = ( "Coordinates",
- "Velocities",
- "ParticleIDs",
- "Mass",
- ("InternalEnergy", "Gas"),
- ("Density", "Gas"),
- ("SmoothingLength", "Gas"),
- )
+ _var_mass = None
- _var_mass = None
+ def __init__(self, pf, *args, **kwargs):
+ self._fields = pf._field_spec
+ self._ptypes = pf._ptype_spec
+ super(IOHandlerGadgetBinary, self).__init__(pf, *args, **kwargs)
@property
def var_mass(self):
https://bitbucket.org/yt_analysis/yt/commits/9d95b42e0e68/
Changeset: 9d95b42e0e68
Branch: yt-3.0
User: bcrosby
Date: 2014-01-10 18:28:36
Summary: First steps in fixing the Enzo frontend
Affected #: 1 file
diff -r 3e4844effef2dcc0f93216890374d3554f819c33 -r 9d95b42e0e686201a6361261b8d70aa3fc7a4055 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -246,7 +246,6 @@
# Now we have to do something unpleasant
chunks = list(chunks)
if selector.__class__.__name__ == "GridSelector":
- print "in the first case of _read_fluid_selection"
if not (len(chunks) == len(chunks[0].objs) == 1):
raise RuntimeError
g = chunks[0].objs[0]
@@ -257,7 +256,7 @@
size = sum((g.count(selector) for chunk in chunks
for g in chunk.objs))
- # this probably fine as-is
+ # this is probably fine as-is
for field in fields:
ftype, fname = field
fsize = size
https://bitbucket.org/yt_analysis/yt/commits/4ef4449dc0af/
Changeset: 4ef4449dc0af
Branch: yt-3.0
User: bcrosby
Date: 2014-01-10 19:07:35
Summary: _read_fluid_selection for enzo data in memory now uses the enzo module. Caching for field detection was removed.
Affected #: 2 files
diff -r 9d95b42e0e686201a6361261b8d70aa3fc7a4055 -r 4ef4449dc0af669eed5d6477f70487a7be779e00 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -595,20 +595,6 @@
_cached_field_list = None
_cached_derived_field_list = None
- def _detect_fields(self):
- if self.__class__._cached_field_list is None:
- EnzoHierarchy._detect_fields(self)
- self.__class__._cached_field_list = self.field_list
- else:
- self.field_list = self.__class__._cached_field_list
-
- def _setup_derived_fields(self):
- if self.__class__._cached_derived_field_list is None:
- EnzoHierarchy._setup_derived_fields(self)
- self.__class__._cached_derived_field_list = self.derived_field_list
- else:
- self.derived_field_list = self.__class__._cached_derived_field_list
-
def _generate_random_grids(self):
my_rank = self.comm.rank
my_grids = self.grids[self.grid_procs.ravel() == my_rank]
diff -r 9d95b42e0e686201a6361261b8d70aa3fc7a4055 -r 4ef4449dc0af669eed5d6477f70487a7be779e00 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -256,7 +256,6 @@
size = sum((g.count(selector) for chunk in chunks
for g in chunk.objs))
- # this is probably fine as-is
for field in fields:
ftype, fname = field
fsize = size
@@ -265,23 +264,17 @@
mylog.debug("Reading %s cells of %s fields in %s grids",
size, [f2 for f1, f2 in fields], ng)
- # not totally sure about this piece... is this needed if the output is in memory?
ind = 0
for chunk in chunks:
- fid = None
for g in chunk.objs:
- if g.filename is None: continue
- if fid is None:
- fid = h5py.h5f.open(g.filename, h5py.h5f.ACC_RDONLY)
+ if g.id not in self.grids_in_memory: continue
+
data = np.empty(g.ActiveDimensions[::-1], dtype="float64")
data_view = data.swapaxes(0,2)
for field in fields:
ftype, fname = field
- dg = h5py.h5d.open(fid, "/Grid%08i/%s" % (g.id, fname))
- dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
- nd = g.select(selector, data_view, rv[field], ind) # caches
- ind += nd
- if fid: fid.close()
+ data_view = self.grids_in_memory[g.id][fname]
+ nd = g.select(selector, data_view, rv[field], ind)
return rv
@property
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list