[yt-svn] commit/yt: 9 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Thu Jul 16 10:08:51 PDT 2015
9 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/08dd5da61eb6/
Changeset: 08dd5da61eb6
Branch: yt
User: RicardaBeckmann
Date: 2015-06-05 11:09:07+00:00
Summary: updated .hgignore
Affected #: 14 files
diff -r ab1bd145878fe53d71c4d000422c9b7723af8c99 -r 08dd5da61eb6a1fd145a1f4046cb63269a142e40 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -63,3 +63,6 @@
doc/_temp/*
doc/source/bootcamp/.ipynb_checkpoints/
dist
+*~
+*#
+#*
diff -r ab1bd145878fe53d71c4d000422c9b7723af8c99 -r 08dd5da61eb6a1fd145a1f4046cb63269a142e40 yt/fields/particle_fields.py~
--- /dev/null
+++ b/yt/fields/particle_fields.py~
@@ -0,0 +1,576 @@
+"""
+These are common particle fields.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.funcs import *
+from yt.units.yt_array import YTArray
+from yt.fields.derived_field import \
+ ValidateParameter, \
+ ValidateSpatial
+from yt.utilities.physical_constants import \
+ mass_hydrogen_cgs, \
+ mass_sun_cgs, \
+ mh
+
+from yt.units.yt_array import \
+ uconcatenate
+
+from yt.utilities.math_utils import \
+ get_sph_r_component, \
+ get_sph_theta_component, \
+ get_sph_phi_component, \
+ get_cyl_r_component, \
+ get_cyl_z_component, \
+ get_cyl_theta_component, \
+ get_cyl_r, get_cyl_theta, \
+ get_cyl_z, get_sph_r, \
+ get_sph_theta, get_sph_phi, \
+ periodic_dist, euclidean_dist
+
+from .vector_operations import \
+ create_magnitude_field
+
+def _field_concat(fname):
+ def _AllFields(field, data):
+ v = []
+ for ptype in data.ds.particle_types:
+ data.ds._last_freq = (ptype, None)
+ if ptype == "all" or \
+ ptype in data.ds.known_filters:
+ continue
+ v.append(data[ptype, fname].copy())
+ rv = uconcatenate(v, axis=0)
+ return rv
+ return _AllFields
+
+def _field_concat_slice(fname, axi):
+ def _AllFields(field, data):
+ v = []
+ for ptype in data.ds.particle_types:
+ data.ds._last_freq = (ptype, None)
+ if ptype == "all" or \
+ ptype in data.ds.known_filters:
+ continue
+ v.append(data[ptype, fname][:,axi])
+ rv = uconcatenate(v, axis=0)
+ return rv
+ return _AllFields
+
+def particle_deposition_functions(ptype, coord_name, mass_name, registry):
+ orig = set(registry.keys())
+ ptype_dn = ptype.replace("_","\/").title()
+ def particle_count(field, data):
+ pos = data[ptype, coord_name]
+ d = data.deposit(pos, method = "count")
+ d = data.ds.arr(d, input_units = "cm**-3")
+ return data.apply_units(d, field.units)
+
+ registry.add_field(("deposit", "%s_count" % ptype),
+ function = particle_count,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{%s Count}" % ptype_dn)
+
+ def particle_mass(field, data):
+ pos = data[ptype, coord_name]
+ pmass = data[ptype, mass_name].in_units(field.units)
+ d = data.deposit(pos, [pmass], method = "sum")
+ return data.apply_units(d, field.units)
+
+ registry.add_field(("deposit", "%s_mass" % ptype),
+ function = particle_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{%s Mass}" % ptype_dn,
+ units = "g")
+
+ def particle_density(field, data):
+ pos = data[ptype, coord_name]
+ mass = data[ptype, mass_name]
+ pos.convert_to_units("code_length")
+ mass.convert_to_units("code_mass")
+ d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+ d = data.ds.arr(d, "code_mass")
+ d /= data["index", "cell_volume"]
+ return d
+
+ registry.add_field(("deposit", "%s_density" % ptype),
+ function = particle_density,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{%s Density}" % ptype_dn,
+ units = "g/cm**3")
+
+ def particle_cic(field, data):
+ pos = data[ptype, coord_name]
+ d = data.deposit(pos, [data[ptype, mass_name]], method = "cic")
+ d = data.apply_units(d, data[ptype, mass_name].units)
+ d /= data["index", "cell_volume"]
+ return d
+
+ registry.add_field(("deposit", "%s_cic" % ptype),
+ function = particle_cic,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{%s CIC Density}" % ptype_dn,
+ units = "g/cm**3")
+
+ def _get_density_weighted_deposit_field(fname, units, method):
+ def _deposit_field(field, data):
+ """
+ Create a grid field for particle quantities weighted by particle
+ mass, using cloud-in-cell deposit.
+ """
+ pos = data[ptype, "particle_position"]
+ # Get back into density
+ pden = data[ptype, 'particle_mass']
+ top = data.deposit(pos, [data[(ptype, fname)]*pden], method=method)
+ bottom = data.deposit(pos, [pden], method=method)
+ top[bottom == 0] = 0.0
+ bnz = bottom.nonzero()
+ top[bnz] /= bottom[bnz]
+ d = data.ds.arr(top, input_units=units)
+ return d
+ return _deposit_field
+
+ for ax in 'xyz':
+ for method, name in zip(("cic", "sum"), ("cic", "nn")):
+ function = _get_density_weighted_deposit_field(
+ "particle_velocity_%s" % ax, "cm/s", method)
+ registry.add_field(
+ ("deposit", ("%s_"+name+"_velocity_%s") % (ptype, ax)),
+ function=function, units="cm/s", take_log=False,
+ validators=[ValidateSpatial(0)])
+
+ # Now some translation functions.
+
+ def particle_ones(field, data):
+ v = np.ones(data[ptype, mass_name].shape, dtype="float64")
+ return data.apply_units(v, field.units)
+
+ registry.add_field((ptype, "particle_ones"),
+ function = particle_ones,
+ particle_type = True,
+ units = "")
+
+ def particle_mesh_ids(field, data):
+ pos = data[ptype, coord_name]
+ ids = np.zeros(pos.shape[0], dtype="float64") - 1
+ # This is float64 in name only. It will be properly cast inside the
+ # deposit operation.
+ #_ids = ids.view("float64")
+ data.deposit(pos, [ids], method = "mesh_id")
+ return data.apply_units(ids, "")
+ registry.add_field((ptype, "mesh_id"),
+ function = particle_mesh_ids,
+ validators = [ValidateSpatial()],
+ particle_type = True)
+
+ return list(set(registry.keys()).difference(orig))
+
+def particle_scalar_functions(ptype, coord_name, vel_name, registry):
+
+ # Now we have to set up the various velocity and coordinate things. In the
+ # future, we'll actually invert this and use the 3-component items
+ # elsewhere, and stop using these.
+
+ # Note that we pass in _ptype here so that it's defined inside the closure.
+
+ def _get_coord_funcs(axi, _ptype):
+ def _particle_velocity(field, data):
+ return data[_ptype, vel_name][:,axi]
+ def _particle_position(field, data):
+ return data[_ptype, coord_name][:,axi]
+ return _particle_velocity, _particle_position
+ for axi, ax in enumerate("xyz"):
+ v, p = _get_coord_funcs(axi, ptype)
+ registry.add_field((ptype, "particle_velocity_%s" % ax),
+ particle_type = True, function = v,
+ units = "code_velocity")
+ registry.add_field((ptype, "particle_position_%s" % ax),
+ particle_type = True, function = p,
+ units = "code_length")
+
+def particle_vector_functions(ptype, coord_names, vel_names, registry):
+
+ # This will column_stack a set of scalars to create vector fields.
+
+ def _get_vec_func(_ptype, names):
+ def particle_vectors(field, data):
+ v = [data[_ptype, name].in_units(field.units)
+ for name in names]
+ c = np.column_stack(v)
+ return data.apply_units(c, field.units)
+ return particle_vectors
+ registry.add_field((ptype, "particle_position"),
+ function=_get_vec_func(ptype, coord_names),
+ units = "code_length",
+ particle_type=True)
+ registry.add_field((ptype, "particle_velocity"),
+ function=_get_vec_func(ptype, vel_names),
+ units = "cm / s",
+ particle_type=True)
+
+def standard_particle_fields(registry, ptype,
+ spos = "particle_position_%s",
+ svel = "particle_velocity_%s"):
+ # This function will set things up based on the scalar fields and standard
+ # yt field names.
+
+ def _particle_velocity_magnitude(field, data):
+ """ M{|v|} """
+ bulk_velocity = data.get_field_parameter("bulk_velocity")
+ if bulk_velocity is None:
+ bulk_velocity = np.zeros(3)
+ return np.sqrt((data[ptype, svel % 'x'] - bulk_velocity[0])**2
+ + (data[ptype, svel % 'y'] - bulk_velocity[1])**2
+ + (data[ptype, svel % 'z'] - bulk_velocity[2])**2 )
+
+ registry.add_field((ptype, "particle_velocity_magnitude"),
+ function=_particle_velocity_magnitude,
+ particle_type=True,
+ take_log=False,
+ units="cm/s")
+
+ def _particle_specific_angular_momentum(field, data):
+ """
+ Calculate the angular of a particle velocity. Returns a vector for each
+ particle.
+ """
+ if data.has_field_parameter("bulk_velocity"):
+ bv = data.get_field_parameter("bulk_velocity")
+ else: bv = np.zeros(3, dtype=np.float64)
+ xv = data[ptype, svel % 'x'] - bv[0]
+ yv = data[ptype, svel % 'y'] - bv[1]
+ zv = data[ptype, svel % 'z'] - bv[2]
+ center = data.get_field_parameter('center')
+ coords = YTArray([data[ptype, spos % 'x'],
+ data[ptype, spos % 'y'],
+ data[ptype, spos % 'z']], dtype=np.float64)
+ new_shape = tuple([3] + [1]*(len(coords.shape)-1))
+ r_vec = coords - np.reshape(center,new_shape)
+ v_vec = YTArray([xv,yv,zv], dtype=np.float64)
+ return np.cross(r_vec, v_vec, axis=0)
+
+ registry.add_field((ptype, "particle_specific_angular_momentum"),
+ function=_particle_specific_angular_momentum,
+ particle_type=True,
+ units="cm**2/s",
+ validators=[ValidateParameter("center")])
+
+ def _particle_specific_angular_momentum_x(field, data):
+ if data.has_field_parameter("bulk_velocity"):
+ bv = data.get_field_parameter("bulk_velocity")
+ else: bv = np.zeros(3, dtype=np.float64)
+ center = data.get_field_parameter('center')
+ y = data[ptype, spos % "y"] - center[1]
+ z = data[ptype, spos % "z"] - center[2]
+ yv = data[ptype, svel % "y"] - bv[1]
+ zv = data[ptype, svel % "z"] - bv[2]
+ return yv*z - zv*y
+
+ registry.add_field((ptype, "particle_specific_angular_momentum_x"),
+ function=_particle_specific_angular_momentum_x,
+ particle_type=True,
+ units="cm**2/s",
+ validators=[ValidateParameter("center")])
+
+ def _particle_specific_angular_momentum_y(field, data):
+ if data.has_field_parameter("bulk_velocity"):
+ bv = data.get_field_parameter("bulk_velocity")
+ else: bv = np.zeros(3, dtype=np.float64)
+ center = data.get_field_parameter('center')
+ x = data[ptype, spos % "x"] - center[0]
+ z = data[ptype, spos % "z"] - center[2]
+ xv = data[ptype, svel % "x"] - bv[0]
+ zv = data[ptype, svel % "z"] - bv[2]
+ return -(xv*z - zv*x)
+
+ registry.add_field((ptype, "particle_specific_angular_momentum_y"),
+ function=_particle_specific_angular_momentum_y,
+ particle_type=True,
+ units="cm**2/s",
+ validators=[ValidateParameter("center")])
+
+ def _particle_specific_angular_momentum_z(field, data):
+ if data.has_field_parameter("bulk_velocity"):
+ bv = data.get_field_parameter("bulk_velocity")
+ else: bv = np.zeros(3, dtype=np.float64)
+ center = data.get_field_parameter('center')
+ x = data[ptype, spos % "x"] - center[0]
+ y = data[ptype, spos % "y"] - center[1]
+ xv = data[ptype, svel % "x"] - bv[0]
+ yv = data[ptype, svel % "y"] - bv[1]
+ return xv*y - yv*x
+
+ registry.add_field((ptype, "particle_specific_angular_momentum_z"),
+ function=_particle_specific_angular_momentum_z,
+ particle_type=True,
+ units="cm**2/s",
+ validators=[ValidateParameter("center")])
+
+ create_magnitude_field(registry, "particle_specific_angular_momentum",
+ "cm**2/s", ftype=ptype, particle_type=True)
+
+ def _particle_angular_momentum_x(field, data):
+ return data[ptype, "particle_mass"] * \
+ data[ptype, "particle_specific_angular_momentum_x"]
+ registry.add_field((ptype, "particle_angular_momentum_x"),
+ function=_particle_angular_momentum_x,
+ units="g*cm**2/s", particle_type=True,
+ validators=[ValidateParameter('center')])
+
+ def _particle_angular_momentum_y(field, data):
+ return data[ptype, "particle_mass"] * \
+ data[ptype, "particle_specific_angular_momentum_y"]
+ registry.add_field((ptype, "particle_angular_momentum_y"),
+ function=_particle_angular_momentum_y,
+ units="g*cm**2/s", particle_type=True,
+ validators=[ValidateParameter('center')])
+
+ def _particle_angular_momentum_z(field, data):
+ return data[ptype, "particle_mass"] * \
+ data[ptype, "particle_specific_angular_momentum_z"]
+ registry.add_field((ptype, "particle_angular_momentum_z"),
+ function=_particle_angular_momentum_z,
+ units="g*cm**2/s", particle_type=True,
+ validators=[ValidateParameter('center')])
+
+ def _particle_angular_momentum(field, data):
+ return data[ptype, "particle_mass"] \
+ * data[ptype, "particle_specific_angular_momentum"]
+ registry.add_field((ptype, "particle_angular_momentum"),
+ function=_particle_angular_momentum,
+ particle_type=True,
+ units="g*cm**2/s",
+ validators=[ValidateParameter("center")])
+
+ create_magnitude_field(registry, "particle_angular_momentum",
+ "g*cm**2/s", ftype=ptype, particle_type=True)
+
+ from .field_functions import \
+ get_radius
+
+ def _particle_radius(field, data):
+ return get_radius(data, "particle_position_")
+ registry.add_field((ptype, "particle_radius"),
+ function=_particle_radius,
+ validators=[ValidateParameter("center")],
+ units="cm", particle_type = True,
+ display_name = "Particle Radius")
+
+ def _particle_spherical_position_radius(field, data):
+ """
+ Radial component of the particles' position vectors in spherical coords
+ on the provided field parameters for 'normal', 'center', and
+ 'bulk_velocity',
+ """
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = spos
+ pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
+ pos = pos - np.reshape(center, (3, 1))
+ sphr = get_sph_r_component(pos, theta, phi, normal)
+ return sphr
+
+ registry.add_field((ptype, "particle_spherical_position_radius"),
+ function=_particle_spherical_position_radius,
+ particle_type=True, units="cm",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+ def _particle_spherical_position_theta(field, data):
+ """
+ Theta component of the particles' position vectors in spherical coords
+ on the provided field parameters for 'normal', 'center', and
+ 'bulk_velocity',
+ """
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = spos
+ pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
+ pos = pos - np.reshape(center, (3, 1))
+ spht = get_sph_theta_component(pos, theta, phi, normal)
+ return spht
+
+ registry.add_field((ptype, "particle_spherical_position_theta"),
+ function=_particle_spherical_position_theta,
+ particle_type=True, units="cm",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+ def _particle_spherical_position_phi(field, data):
+ """
+ Phi component of the particles' position vectors in spherical coords
+ on the provided field parameters for 'normal', 'center', and
+ 'bulk_velocity',
+ """
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = spos
+ pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
+ pos = pos - np.reshape(center, (3, 1))
+ sphp = get_sph_phi_component(pos, phi, normal)
+ return sphp
+
+ registry.add_field((ptype, "particle_spherical_position_phi"),
+ function=_particle_spherical_position_phi,
+ particle_type=True, units="cm",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+ def _particle_spherical_velocity_radius(field, data):
+ """
+ Radial component of the particles' velocity vectors in spherical coords
+ based on the provided field parameters for 'normal', 'center', and
+ 'bulk_velocity',
+ """
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = spos
+ pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+ vel = svel
+ vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ sphr = get_sph_r_component(vel, theta, phi, normal)
+ return sphr
+
+ registry.add_field((ptype, "particle_spherical_velocity_radius"),
+ function=_particle_spherical_velocity_radius,
+ particle_type=True, units="cm/s",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+ # This is simply aliased to "particle_spherical_velocity_radius"
+ # for ease of use.
+ registry.add_field((ptype, "particle_radial_velocity"),
+ function=_particle_spherical_velocity_radius,
+ particle_type=True, units="cm/s",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+ def _particle_spherical_velocity_theta(field, data):
+ """
+ Theta component of the particles' velocity vectors in spherical coords
+ based on the provided field parameters for 'normal', 'center', and
+ 'bulk_velocity',
+ """
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = spos
+ pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+ vel = svel
+ vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ spht = get_sph_theta_component(vel, theta, phi, normal)
+ return spht
+
+ registry.add_field((ptype, "particle_spherical_velocity_theta"),
+ function=_particle_spherical_velocity_theta,
+ particle_type=True, units="cm/s",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+ def _particle_spherical_velocity_phi(field, data):
+ """
+ Phi component of the particles' velocity vectors in spherical coords
+ based on the provided field parameters for 'normal', 'center', and
+ 'bulk_velocity',
+ """
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+ vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ sphp = get_sph_phi_component(vel, phi, normal)
+ return sphp
+
+ registry.add_field((ptype, "particle_spherical_velocity_phi"),
+ function=_particle_spherical_velocity_phi,
+ particle_type=True, units="cm/s",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def add_particle_average(registry, ptype, field_name,
+ weight = "particle_mass",
+ density = True):
+ field_units = registry[ptype, field_name].units
+ def _pfunc_avg(field, data):
+ pos = data[ptype, "particle_position"]
+ f = data[ptype, field_name]
+ wf = data[ptype, weight]
+ f *= wf
+ v = data.deposit(pos, [f], method = "sum")
+ w = data.deposit(pos, [wf], method = "sum")
+ v /= w
+ if density: v /= data["index", "cell_volume"]
+ v[np.isnan(v)] = 0.0
+ return v
+ fn = ("deposit", "%s_avg_%s" % (ptype, field_name))
+ registry.add_field(fn, function=_pfunc_avg,
+ validators = [ValidateSpatial(0)],
+ particle_type = False,
+ units = field_units)
+ return fn
+
+def add_volume_weighted_smoothed_field(ptype, coord_name, mass_name,
+ smoothing_length_name, density_name, smoothed_field, registry,
+ nneighbors = None):
+ field_name = ("deposit", "%s_smoothed_%s" % (ptype, smoothed_field))
+ field_units = registry[ptype, smoothed_field].units
+ def _vol_weight(field, data):
+ pos = data[ptype, coord_name].in_units("code_length")
+ mass = data[ptype, mass_name].in_cgs()
+ dens = data[ptype, density_name].in_cgs()
+ quan = data[ptype, smoothed_field].in_units(field_units)
+ if smoothing_length_name is None:
+ hsml = np.zeros(quan.shape, dtype='float64') - 1
+ hsml = data.apply_units(hsml, "code_length")
+ else:
+ hsml = data[ptype, smoothing_length_name].in_units("code_length")
+ kwargs = {}
+ if nneighbors:
+ kwargs['nneighbors'] = nneighbors
+ rv = data.smooth(pos, [mass, hsml, dens, quan],
+ method="volume_weighted",
+ create_octree = True)[0]
+ rv[np.isnan(rv)] = 0.0
+ # Now some quick unit conversions.
+ rv = data.apply_units(rv, field_units)
+ return rv
+ registry.add_field(field_name, function = _vol_weight,
+ validators = [ValidateSpatial(0)],
+ units = field_units)
+ return [field_name]
+
diff -r ab1bd145878fe53d71c4d000422c9b7723af8c99 -r 08dd5da61eb6a1fd145a1f4046cb63269a142e40 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -491,13 +491,21 @@
"""
Generates the conversion to various physical _units based on the parameter file
"""
- # Note that unit_l *already* converts to proper!
- # Also note that unit_l must be multiplied by the boxlen parameter to
- # ensure we are correctly set up for the current domain.
- length_unit = self.parameters['unit_l']
- boxlen = self.parameters['boxlen']
- density_unit = self.parameters['unit_d']
- mass_unit = density_unit * (length_unit * boxlen)**3
+ #Please note that for all units given in the info file, the boxlen
+ #still needs to be folded in, as shown below!
+
+ length_unit = self.parameters['unit_l'] * self.parameters['boxlen']
+ rho_u = self.parameters['unit_d']/ self.parameters['boxlen']**3
+
+ # In the mass unit, the factors of boxlen cancel back out, so this
+ #is equivalent to unit_d*unit_l**3
+
+ mass_unit = rho_u * length_unit**3
+
+ # Cosmological runs are done in lookback conformal time.
+ # To convert to proper time, the time unit is calculated from
+ # the expansion factor. This is not yet done here!
+
time_unit = self.parameters['unit_t']
magnetic_unit = np.sqrt(4*np.pi * mass_unit /
(time_unit**2 * length_unit))
diff -r ab1bd145878fe53d71c4d000422c9b7723af8c99 -r 08dd5da61eb6a1fd145a1f4046cb63269a142e40 yt/frontends/ramses/data_structures.py.orig
--- /dev/null
+++ b/yt/frontends/ramses/data_structures.py.orig
@@ -0,0 +1,596 @@
+"""
+RAMSES-specific data structures
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import numpy as np
+import stat
+import weakref
+import cStringIO
+<<<<<<< local
+import scipy.integrate
+
+=======
+>>>>>>> other
+from yt.funcs import *
+from yt.geometry.oct_geometry_handler import \
+ OctreeIndex
+from yt.geometry.geometry_handler import \
+ Index, YTDataChunk
+from yt.data_objects.static_output import \
+ Dataset
+from yt.data_objects.octree_subset import \
+ OctreeSubset
+
+from .definitions import ramses_header, field_aliases
+from yt.utilities.lib.misc_utilities import \
+ get_box_grids_level
+from yt.utilities.io_handler import \
+ io_registry
+from yt.utilities.physical_constants import mp, kb
+from .fields import \
+ RAMSESFieldInfo, _X
+import yt.utilities.fortran_utils as fpu
+from yt.geometry.oct_container import \
+ RAMSESOctreeContainer
+from yt.fields.particle_fields import \
+ standard_particle_fields
+
+class RAMSESDomainFile(object):
+ _last_mask = None
+ _last_selector_id = None
+
+ def __init__(self, ds, domain_id):
+ self.ds = ds
+ self.domain_id = domain_id
+ self.nvar = 0 # Set this later!
+ num = os.path.basename(ds.parameter_filename).split("."
+ )[0].split("_")[1]
+ basename = "%s/%%s_%s.out%05i" % (
+ os.path.abspath(
+ os.path.dirname(ds.parameter_filename)),
+ num, domain_id)
+ for t in ['grav', 'hydro', 'part', 'amr']:
+ setattr(self, "%s_fn" % t, basename % t)
+ self._read_amr_header()
+ self._read_hydro_header()
+ self._read_particle_header()
+ self._read_amr()
+
+ _hydro_offset = None
+ _level_count = None
+
+ def __repr__(self):
+ return "RAMSESDomainFile: %i" % self.domain_id
+
+ def _is_hydro(self):
+ '''
+ Does the output include hydro?
+ '''
+ return os.path.exists(self.hydro_fn)
+
+ @property
+ def level_count(self):
+ if self._level_count is not None: return self._level_count
+ self.hydro_offset
+ return self._level_count
+
+ @property
+ def hydro_offset(self):
+ if self._hydro_offset is not None: return self._hydro_offset
+ # We now have to open the file and calculate it
+ f = open(self.hydro_fn, "rb")
+ fpu.skip(f, 6)
+ # It goes: level, CPU, 8-variable
+ min_level = self.ds.min_level
+ n_levels = self.amr_header['nlevelmax'] - min_level
+ hydro_offset = np.zeros(n_levels, dtype='int64')
+ hydro_offset -= 1
+ level_count = np.zeros(n_levels, dtype='int64')
+ skipped = []
+ for level in range(self.amr_header['nlevelmax']):
+ for cpu in range(self.amr_header['nboundary'] +
+ self.amr_header['ncpu']):
+ header = ( ('file_ilevel', 1, 'I'),
+ ('file_ncache', 1, 'I') )
+ try:
+ hvals = fpu.read_attrs(f, header, "=")
+ except AssertionError:
+ print "You are running with the wrong number of fields."
+ print "If you specified these in the load command, check the array length."
+ print "In this file there are %s hydro fields." % skipped
+ #print "The last set of field sizes was: %s" % skipped
+ raise
+ if hvals['file_ncache'] == 0: continue
+ assert(hvals['file_ilevel'] == level+1)
+ if cpu + 1 == self.domain_id and level >= min_level:
+ hydro_offset[level - min_level] = f.tell()
+ level_count[level - min_level] = hvals['file_ncache']
+ skipped = fpu.skip(f, 8 * self.nvar)
+ self._hydro_offset = hydro_offset
+ self._level_count = level_count
+ return self._hydro_offset
+
+ def _read_hydro_header(self):
+ # If no hydro file is found, return
+ if not self._is_hydro():
+ return
+ if self.nvar > 0: return self.nvar
+ # Read the number of hydro variables
+ f = open(self.hydro_fn, "rb")
+ fpu.skip(f, 1)
+ self.nvar = fpu.read_vector(f, "i")[0]
+
+ def _read_particle_header(self):
+ if not os.path.exists(self.part_fn):
+ self.local_particle_count = 0
+ self.particle_field_offsets = {}
+ return
+ f = open(self.part_fn, "rb")
+ f.seek(0, os.SEEK_END)
+ flen = f.tell()
+ f.seek(0)
+ hvals = {}
+ attrs = ( ('ncpu', 1, 'I'),
+ ('ndim', 1, 'I'),
+ ('npart', 1, 'I') )
+ hvals.update(fpu.read_attrs(f, attrs))
+ fpu.read_vector(f, 'I')
+
+ attrs = ( ('nstar_tot', 1, 'I'),
+ ('mstar_tot', 1, 'd'),
+ ('mstar_lost', 1, 'd'),
+ ('nsink', 1, 'I') )
+ hvals.update(fpu.read_attrs(f, attrs))
+ self.particle_header = hvals
+ self.local_particle_count = hvals['npart']
+ particle_fields = [
+ ("particle_position_x", "d"),
+ ("particle_position_y", "d"),
+ ("particle_position_z", "d"),
+ ("particle_velocity_x", "d"),
+ ("particle_velocity_y", "d"),
+ ("particle_velocity_z", "d"),
+ ("particle_mass", "d"),
+ ("particle_identifier", "I"),
+ ("particle_refinement_level", "I")]
+ if hvals["nstar_tot"] > 0:
+ particle_fields += [("particle_age", "d"),
+ ("particle_metallicity", "d")]
+
+ field_offsets = {}
+ _pfields = {}
+ for field, vtype in particle_fields:
+ if f.tell() >= flen: break
+ field_offsets["io", field] = f.tell()
+ _pfields["io", field] = vtype
+ fpu.skip(f, 1)
+ self.particle_field_offsets = field_offsets
+ self.particle_field_types = _pfields
+ self.particle_types = self.particle_types_raw = ("io",)
+
+ def _read_amr_header(self):
+ hvals = {}
+ f = open(self.amr_fn, "rb")
+ for header in ramses_header(hvals):
+ hvals.update(fpu.read_attrs(f, header))
+ # That's the header, now we skip a few.
+ hvals['numbl'] = np.array(hvals['numbl']).reshape(
+ (hvals['nlevelmax'], hvals['ncpu']))
+ fpu.skip(f)
+ if hvals['nboundary'] > 0:
+ fpu.skip(f, 2)
+ self.ngridbound = fpu.read_vector(f, 'i').astype("int64")
+ else:
+ self.ngridbound = np.zeros(hvals['nlevelmax'], dtype='int64')
+ free_mem = fpu.read_attrs(f, (('free_mem', 5, 'i'), ) )
+ ordering = fpu.read_vector(f, 'c')
+ fpu.skip(f, 4)
+ # Now we're at the tree itself
+ # Now we iterate over each level and each CPU.
+ self.amr_header = hvals
+ self.amr_offset = f.tell()
+ self.local_oct_count = hvals['numbl'][self.ds.min_level:, self.domain_id - 1].sum()
+ self.total_oct_count = hvals['numbl'][self.ds.min_level:,:].sum(axis=0)
+
+ def _read_amr(self):
+ """Open the oct file, read in octs level-by-level.
+ For each oct, only the position, index, level and domain
+ are needed - its position in the octree is found automatically.
+ The most important is finding all the information to feed
+ oct_handler.add
+ """
+ self.oct_handler = RAMSESOctreeContainer(self.ds.domain_dimensions/2,
+ self.ds.domain_left_edge, self.ds.domain_right_edge)
+ root_nodes = self.amr_header['numbl'][self.ds.min_level,:].sum()
+ self.oct_handler.allocate_domains(self.total_oct_count, root_nodes)
+ fb = open(self.amr_fn, "rb")
+ fb.seek(self.amr_offset)
+ f = cStringIO.StringIO()
+ f.write(fb.read())
+ f.seek(0)
+ mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)",
+ self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum())
+ def _ng(c, l):
+ if c < self.amr_header['ncpu']:
+ ng = self.amr_header['numbl'][l, c]
+ else:
+ ng = self.ngridbound[c - self.amr_header['ncpu'] +
+ self.amr_header['nboundary']*l]
+ return ng
+ min_level = self.ds.min_level
+ # yt max level is not the same as the RAMSES one.
+ # yt max level is the maximum number of additional refinement levels
+ # so for a uni grid run with no refinement, it would be 0.
+ # So we initially assume that.
+ max_level = 0
+ nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
+ for level in range(self.amr_header['nlevelmax']):
+ # Easier if do this 1-indexed
+ for cpu in range(self.amr_header['nboundary'] + self.amr_header['ncpu']):
+ #ng is the number of octs on this level on this domain
+ ng = _ng(cpu, level)
+ if ng == 0: continue
+ ind = fpu.read_vector(f, "I").astype("int64")
+ fpu.skip(f, 2)
+ pos = np.empty((ng, 3), dtype='float64')
+ pos[:,0] = fpu.read_vector(f, "d") - nx
+ pos[:,1] = fpu.read_vector(f, "d") - ny
+ pos[:,2] = fpu.read_vector(f, "d") - nz
+ #pos *= self.ds.domain_width
+ #pos += self.dataset.domain_left_edge
+ fpu.skip(f, 31)
+ #parents = fpu.read_vector(f, "I")
+ #fpu.skip(f, 6)
+ #children = np.empty((ng, 8), dtype='int64')
+ #for i in range(8):
+ # children[:,i] = fpu.read_vector(f, "I")
+ #cpu_map = np.empty((ng, 8), dtype="int64")
+ #for i in range(8):
+ # cpu_map[:,i] = fpu.read_vector(f, "I")
+ #rmap = np.empty((ng, 8), dtype="int64")
+ #for i in range(8):
+ # rmap[:,i] = fpu.read_vector(f, "I")
+ # We don't want duplicate grids.
+ # Note that we're adding *grids*, not individual cells.
+ if level >= min_level:
+ assert(pos.shape[0] == ng)
+ n = self.oct_handler.add(cpu + 1, level - min_level, pos,
+ count_boundary = 1)
+ self._error_check(cpu, level, pos, n, ng, (nx, ny, nz))
+ if n > 0: max_level = max(level - min_level, max_level)
+ self.max_level = max_level
+ self.oct_handler.finalize()
+
+ def _error_check(self, cpu, level, pos, n, ng, nn):
+ # NOTE: We have the second conditional here because internally, it will
+ # not add any octs in that case.
+ if n == ng or cpu + 1 > self.oct_handler.num_domains:
+ return
+ # This is where we now check for issues with creating the new octs, and
+ # we attempt to determine what precisely is going wrong.
+ # These are all print statements.
+ print "We have detected an error with the construction of the Octree."
+ print " The number of Octs to be added : %s" % ng
+ print " The number of Octs added : %s" % n
+ print " Level : %s" % level
+ print " CPU Number (0-indexed) : %s" % cpu
+ for i, ax in enumerate('xyz'):
+ print " extent [%s] : %s %s" % \
+ (ax, pos[:,i].min(), pos[:,i].max())
+ print " domain left : %s" % \
+ (self.ds.domain_left_edge,)
+ print " domain right : %s" % \
+ (self.ds.domain_right_edge,)
+ print " offset applied : %s %s %s" % \
+ (nn[0], nn[1], nn[2])
+ print "AMR Header:"
+ for key in sorted(self.amr_header):
+ print " %-30s: %s" % (key, self.amr_header[key])
+ raise RuntimeError
+
+ def included(self, selector):
+ if getattr(selector, "domain_id", None) is not None:
+ return selector.domain_id == self.domain_id
+ domain_ids = self.oct_handler.domain_identify(selector)
+ return self.domain_id in domain_ids
+
+class RAMSESDomainSubset(OctreeSubset):
+
+ _domain_offset = 1
+
+ def fill(self, content, fields, selector):
+ # Here we get a copy of the file, which we skip through and read the
+ # bits we want.
+ oct_handler = self.oct_handler
+ all_fields = self.domain.ds.index.fluid_field_list
+ fields = [f for ft, f in fields]
+ tr = {}
+ cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
+ levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
+ selector, self.domain_id, cell_count)
+ for field in fields:
+ tr[field] = np.zeros(cell_count, 'float64')
+ for level, offset in enumerate(self.domain.hydro_offset):
+ if offset == -1: continue
+ content.seek(offset)
+ nc = self.domain.level_count[level]
+ temp = {}
+ for field in all_fields:
+ temp[field] = np.empty((nc, 8), dtype="float64")
+ for i in range(8):
+ for field in all_fields:
+ if field not in fields:
+ fpu.skip(content)
+ else:
+ temp[field][:,i] = fpu.read_vector(content, 'd') # cell 1
+ oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp)
+ return tr
+
+class RAMSESIndex(OctreeIndex):
+
+ def __init__(self, ds, dataset_type='ramses'):
+ self.fluid_field_list = ds._fields_in_file
+ self.dataset_type = dataset_type
+ self.dataset = weakref.proxy(ds)
+ self.index_filename = self.dataset.parameter_filename
+ self.directory = os.path.dirname(self.index_filename)
+ self.max_level = None
+
+ self.float_type = np.float64
+ super(RAMSESIndex, self).__init__(ds, dataset_type)
+
+ def _initialize_oct_handler(self):
+ self.domains = [RAMSESDomainFile(self.dataset, i + 1)
+ for i in range(self.dataset['ncpu'])]
+ total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
+ for dom in self.domains)
+ self.max_level = max(dom.max_level for dom in self.domains)
+ self.num_grids = total_octs
+
+ def _detect_output_fields(self):
+ # Do we want to attempt to figure out what the fields are in the file?
+ dsl = set([])
+ if self.fluid_field_list is None or len(self.fluid_field_list) <= 0:
+ self._setup_auto_fields()
+ for domain in self.domains:
+ dsl.update(set(domain.particle_field_offsets.keys()))
+ self.particle_field_list = list(dsl)
+ self.field_list = [("ramses", f) for f in self.fluid_field_list] \
+ + self.particle_field_list
+
+ def _setup_auto_fields(self):
+ '''
+ If no fluid fields are set, the code tries to set up a fluids array by hand
+ '''
+ # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
+ # Find nvar
+
+
+ # TODO: copy/pasted from DomainFile; needs refactoring!
+ num = os.path.basename(self.dataset.parameter_filename).split("."
+ )[0].split("_")[1]
+ testdomain = 1 # Just pick the first domain file to read
+ basename = "%s/%%s_%s.out%05i" % (
+ os.path.abspath(
+ os.path.dirname(self.dataset.parameter_filename)),
+ num, testdomain)
+ hydro_fn = basename % "hydro"
+ # Do we have a hydro file?
+ if not os.path.exists(hydro_fn):
+ self.fluid_field_list = []
+ return
+ # Read the number of hydro variables
+ f = open(hydro_fn, "rb")
+ hydro_header = ( ('ncpu', 1, 'i'),
+ ('nvar', 1, 'i'),
+ ('ndim', 1, 'i'),
+ ('nlevelmax', 1, 'i'),
+ ('nboundary', 1, 'i'),
+ ('gamma', 1, 'd')
+ )
+ hvals = fpu.read_attrs(f, hydro_header)
+ self.ds.gamma = hvals['gamma']
+ nvar = hvals['nvar']
+ # OK, we got NVAR, now set up the arrays depending on what NVAR is
+ # Allow some wiggle room for users to add too many variables
+ if nvar < 5:
+ mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
+ raise ValueError
+ # Basic hydro runs
+ if nvar == 5:
+ fields = ["Density",
+ "x-velocity", "y-velocity", "z-velocity",
+ "Pressure"]
+ if nvar > 5 and nvar < 11:
+ fields = ["Density",
+ "x-velocity", "y-velocity", "z-velocity",
+ "Pressure", "Metallicity"]
+ # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
+ if nvar == 11:
+ fields = ["Density",
+ "x-velocity", "y-velocity", "z-velocity",
+ "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+ "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
+ "Pressure"]
+ if nvar > 11:
+ fields = ["Density",
+ "x-velocity", "y-velocity", "z-velocity",
+ "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+ "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
+ "Pressure","Metallicity"]
+ while len(fields) < nvar:
+ fields.append("var"+str(len(fields)))
+ mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))
+ self.fluid_field_list = fields
+
+ def _identify_base_chunk(self, dobj):
+ if getattr(dobj, "_chunk_info", None) is None:
+ domains = [dom for dom in self.domains if
+ dom.included(dobj.selector)]
+ base_region = getattr(dobj, "base_region", dobj)
+ if len(domains) > 1:
+ mylog.debug("Identified %s intersecting domains", len(domains))
+ subsets = [RAMSESDomainSubset(base_region, domain, self.dataset)
+ for domain in domains]
+ dobj._chunk_info = subsets
+ dobj._current_chunk = list(self._chunk_all(dobj))[0]
+
+ def _chunk_all(self, dobj):
+ oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+ yield YTDataChunk(dobj, "all", oobjs, None)
+
+ def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
+ sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+ for i,og in enumerate(sobjs):
+ if ngz > 0:
+ g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+ else:
+ g = og
+ yield YTDataChunk(dobj, "spatial", [g], None)
+
+ def _chunk_io(self, dobj, cache = True, local_only = False):
+ oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+ for subset in oobjs:
+ yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
+
+class RAMSESDataset(Dataset):
+ _index_class = RAMSESIndex
+ _field_info_class = RAMSESFieldInfo
+ gamma = 1.4 # This will get replaced on hydro_fn open
+
+ def __init__(self, filename, dataset_type='ramses',
+ fields = None, storage_filename = None,
+ units_override=None):
+ # Here we want to initiate a traceback, if the reader is not built.
+ if isinstance(fields, types.StringTypes):
+ fields = field_aliases[fields]
+ '''
+ fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
+ If set to None, will try a default set of fields
+ '''
+ self.fluid_types += ("ramses",)
+ self._fields_in_file = fields
+ Dataset.__init__(self, filename, dataset_type, units_override=units_override)
+ self.storage_filename = storage_filename
+
+ def __repr__(self):
+ return self.basename.rsplit(".", 1)[0]
+
+ def _set_code_unit_attributes(self):
+ """
+ Generates the conversion to various physical _units based on the parameter file
+ """
+ # Note that unit_l *already* converts to proper!
+ # Also note that unit_l must be multiplied by the boxlen parameter to
+ # ensure we are correctly set up for the current domain.
+<<<<<<< local
+ length_unit = self.parameters['unit_l'] * self.parameters['boxlen']
+ rho_u = self.parameters['unit_d']/ self.parameters['boxlen']**3
+ # We're not multiplying by the boxlength here.
+ mass_unit = rho_u * length_unit**3
+ # Cosmological runs are done in lookback conformal time.
+ #To convert to proper time, the time unit is calculated from
+ # the expansion factor.
+=======
+ length_unit = self.parameters['unit_l']
+ boxlen = self.parameters['boxlen']
+ density_unit = self.parameters['unit_d']
+ mass_unit = density_unit * (length_unit * boxlen)**3
+>>>>>>> other
+ time_unit = self.parameters['unit_t']
+ magnetic_unit = np.sqrt(4*np.pi * mass_unit /
+ (time_unit**2 * length_unit))
+ pressure_unit = density_unit * (length_unit / time_unit)**2
+ # TODO:
+ # Generalize the temperature field to account for ionization
+ # For now assume an atomic ideal gas with cosmic abundances (x_H = 0.76)
+ mean_molecular_weight_factor = _X**-1
+
+ self.density_unit = self.quan(density_unit, 'g/cm**3')
+ self.magnetic_unit = self.quan(magnetic_unit, "gauss")
+ self.length_unit = self.quan(length_unit * boxlen, "cm")
+ self.mass_unit = self.quan(mass_unit, "g")
+ self.time_unit = self.quan(time_unit, "s")
+ self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
+ self.temperature_unit = (self.velocity_unit**2 * mp *
+ mean_molecular_weight_factor / kb)
+ self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
+
+ def _parse_parameter_file(self):
+ # hardcoded for now
+ # These should be explicitly obtained from the file, but for now that
+ # will wait until a reorganization of the source tree and better
+ # generalization.
+ self.dimensionality = 3
+ self.refine_by = 2
+ self.parameters["HydroMethod"] = 'ramses'
+ self.parameters["Time"] = 1. # default unit is 1...
+
+ self.unique_identifier = \
+ int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+ # We now execute the same logic Oliver's code does
+ rheader = {}
+ f = open(self.parameter_filename)
+ def read_rhs(cast):
+ line = f.readline()
+ p, v = line.split("=")
+ rheader[p.strip()] = cast(v)
+ for i in range(6): read_rhs(int)
+ f.readline()
+ for i in range(11): read_rhs(float)
+ f.readline()
+ read_rhs(str)
+ # This next line deserves some comment. We specify a min_level that
+ # corresponds to the minimum level in the RAMSES simulation. RAMSES is
+ # one-indexed, but it also does refer to the *oct* dimensions -- so
+ # this means that a levelmin of 1 would have *1* oct in it. So a
+ # levelmin of 2 would have 8 octs at the root mesh level.
+ self.min_level = rheader['levelmin'] - 1
+ # Now we read the hilbert indices
+ self.hilbert_indices = {}
+ if rheader['ordering type'] == "hilbert":
+ f.readline() # header
+ for n in range(rheader['ncpu']):
+ dom, mi, ma = f.readline().split()
+ self.hilbert_indices[int(dom)] = (float(mi), float(ma))
+ self.parameters.update(rheader)
+ self.current_time = self.parameters['time']
+ self.domain_left_edge = np.zeros(3, dtype='float64')
+ self.domain_dimensions = np.ones(3, dtype='int32') * \
+ 2**(self.min_level+1)
+ self.domain_right_edge = np.ones(3, dtype='float64')
+ # This is likely not true, but it's not clear how to determine the boundary conditions
+ self.periodicity = (True, True, True)
+ # These conditions seem to always be true for non-cosmological datasets
+ if rheader["time"] > 0 and rheader["H0"] == 1 and rheader["aexp"] == 1:
+ self.cosmological_simulation = 0
+ self.current_redshift = 0
+ self.hubble_constant = 0
+ self.omega_matter = 0
+ self.omega_lambda = 0
+ else:
+ self.cosmological_simulation = 1
+ self.current_redshift = (1.0 / rheader["aexp"]) - 1.0
+ self.omega_lambda = rheader["omega_l"]
+ self.omega_matter = rheader["omega_m"]
+ self.hubble_constant = rheader["H0"] / 100.0 # This is H100
+ self.max_level = rheader['levelmax'] - self.min_level - 1
+ f.close()
+
+ @classmethod
+ def _is_valid(self, *args, **kwargs):
+ if not os.path.basename(args[0]).startswith("info_"): return False
+ fn = args[0].replace("info_", "amr_").replace(".txt", ".out00001")
+ return os.path.exists(fn)
diff -r ab1bd145878fe53d71c4d000422c9b7723af8c99 -r 08dd5da61eb6a1fd145a1f4046cb63269a142e40 yt/frontends/ramses/data_structures.py~
--- /dev/null
+++ b/yt/frontends/ramses/data_structures.py~
@@ -0,0 +1,563 @@
+"""
+RAMSES-specific data structures
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import numpy as np
+import stat
+import weakref
+import cStringIO
+import scipy.integrate
+
+from yt.funcs import *
+from yt.geometry.oct_geometry_handler import \
+ OctreeIndex
+from yt.geometry.geometry_handler import \
+ Index, YTDataChunk
+from yt.data_objects.static_output import \
+ Dataset
+from yt.data_objects.octree_subset import \
+ OctreeSubset
+
+from .definitions import ramses_header, field_aliases
+from yt.utilities.lib.misc_utilities import \
+ get_box_grids_level
+from yt.utilities.io_handler import \
+ io_registry
+from .fields import \
+ RAMSESFieldInfo
+import yt.utilities.fortran_utils as fpu
+from yt.geometry.oct_container import \
+ RAMSESOctreeContainer
+from yt.fields.particle_fields import \
+ standard_particle_fields
+
+class RAMSESDomainFile(object):
+ _last_mask = None
+ _last_selector_id = None
+
+ def __init__(self, ds, domain_id):
+ self.ds = ds
+ self.domain_id = domain_id
+ self.nvar = 0 # Set this later!
+ num = os.path.basename(ds.parameter_filename).split("."
+ )[0].split("_")[1]
+ basename = "%s/%%s_%s.out%05i" % (
+ os.path.abspath(
+ os.path.dirname(ds.parameter_filename)),
+ num, domain_id)
+ for t in ['grav', 'hydro', 'part', 'amr']:
+ setattr(self, "%s_fn" % t, basename % t)
+ self._read_amr_header()
+ self._read_hydro_header()
+ self._read_particle_header()
+ self._read_amr()
+
+ _hydro_offset = None
+ _level_count = None
+
+ def __repr__(self):
+ return "RAMSESDomainFile: %i" % self.domain_id
+
+ def _is_hydro(self):
+ '''
+ Does the output include hydro?
+ '''
+ return os.path.exists(self.hydro_fn)
+
+ @property
+ def level_count(self):
+ if self._level_count is not None: return self._level_count
+ self.hydro_offset
+ return self._level_count
+
+ @property
+ def hydro_offset(self):
+ if self._hydro_offset is not None: return self._hydro_offset
+ # We now have to open the file and calculate it
+ f = open(self.hydro_fn, "rb")
+ fpu.skip(f, 6)
+ # It goes: level, CPU, 8-variable
+ min_level = self.ds.min_level
+ n_levels = self.amr_header['nlevelmax'] - min_level
+ hydro_offset = np.zeros(n_levels, dtype='int64')
+ hydro_offset -= 1
+ level_count = np.zeros(n_levels, dtype='int64')
+ skipped = []
+ for level in range(self.amr_header['nlevelmax']):
+ for cpu in range(self.amr_header['nboundary'] +
+ self.amr_header['ncpu']):
+ header = ( ('file_ilevel', 1, 'I'),
+ ('file_ncache', 1, 'I') )
+ try:
+ hvals = fpu.read_attrs(f, header, "=")
+ except AssertionError:
+ print "You are running with the wrong number of fields."
+ print "If you specified these in the load command, check the array length."
+ print "In this file there are %s hydro fields." % skipped
+ #print "The last set of field sizes was: %s" % skipped
+ raise
+ if hvals['file_ncache'] == 0: continue
+ assert(hvals['file_ilevel'] == level+1)
+ if cpu + 1 == self.domain_id and level >= min_level:
+ hydro_offset[level - min_level] = f.tell()
+ level_count[level - min_level] = hvals['file_ncache']
+ skipped = fpu.skip(f, 8 * self.nvar)
+ self._hydro_offset = hydro_offset
+ self._level_count = level_count
+ return self._hydro_offset
+
+ def _read_hydro_header(self):
+ # If no hydro file is found, return
+ if not self._is_hydro():
+ return
+ if self.nvar > 0: return self.nvar
+ # Read the number of hydro variables
+ f = open(self.hydro_fn, "rb")
+ fpu.skip(f, 1)
+ self.nvar = fpu.read_vector(f, "i")[0]
+
+ def _read_particle_header(self):
+ if not os.path.exists(self.part_fn):
+ self.local_particle_count = 0
+ self.particle_field_offsets = {}
+ return
+ f = open(self.part_fn, "rb")
+ f.seek(0, os.SEEK_END)
+ flen = f.tell()
+ f.seek(0)
+ hvals = {}
+ attrs = ( ('ncpu', 1, 'I'),
+ ('ndim', 1, 'I'),
+ ('npart', 1, 'I') )
+ hvals.update(fpu.read_attrs(f, attrs))
+ fpu.read_vector(f, 'I')
+
+ attrs = ( ('nstar_tot', 1, 'I'),
+ ('mstar_tot', 1, 'd'),
+ ('mstar_lost', 1, 'd'),
+ ('nsink', 1, 'I') )
+ hvals.update(fpu.read_attrs(f, attrs))
+ self.particle_header = hvals
+ self.local_particle_count = hvals['npart']
+ particle_fields = [
+ ("particle_position_x", "d"),
+ ("particle_position_y", "d"),
+ ("particle_position_z", "d"),
+ ("particle_velocity_x", "d"),
+ ("particle_velocity_y", "d"),
+ ("particle_velocity_z", "d"),
+ ("particle_mass", "d"),
+ ("particle_identifier", "I"),
+ ("particle_refinement_level", "I")]
+ if hvals["nstar_tot"] > 0:
+ particle_fields += [("particle_age", "d"),
+ ("particle_metallicity", "d")]
+ field_offsets = {}
+ _pfields = {}
+ for field, vtype in particle_fields:
+ if f.tell() >= flen: break
+ field_offsets["io", field] = f.tell()
+ _pfields["io", field] = vtype
+ fpu.skip(f, 1)
+ self.particle_field_offsets = field_offsets
+ self.particle_field_types = _pfields
+ self.particle_types = self.particle_types_raw = ("io",)
+
+ def _read_amr_header(self):
+ hvals = {}
+ f = open(self.amr_fn, "rb")
+ for header in ramses_header(hvals):
+ hvals.update(fpu.read_attrs(f, header))
+ # That's the header, now we skip a few.
+ hvals['numbl'] = np.array(hvals['numbl']).reshape(
+ (hvals['nlevelmax'], hvals['ncpu']))
+ fpu.skip(f)
+ if hvals['nboundary'] > 0:
+ fpu.skip(f, 2)
+ self.ngridbound = fpu.read_vector(f, 'i').astype("int64")
+ else:
+ self.ngridbound = np.zeros(hvals['nlevelmax'], dtype='int64')
+ free_mem = fpu.read_attrs(f, (('free_mem', 5, 'i'), ) )
+ ordering = fpu.read_vector(f, 'c')
+ fpu.skip(f, 4)
+ # Now we're at the tree itself
+ # Now we iterate over each level and each CPU.
+ self.amr_header = hvals
+ self.amr_offset = f.tell()
+ self.local_oct_count = hvals['numbl'][self.ds.min_level:, self.domain_id - 1].sum()
+ self.total_oct_count = hvals['numbl'][self.ds.min_level:,:].sum(axis=0)
+
+ def _read_amr(self):
+ """Open the oct file, read in octs level-by-level.
+ For each oct, only the position, index, level and domain
+ are needed - its position in the octree is found automatically.
+ The most important is finding all the information to feed
+ oct_handler.add
+ """
+ self.oct_handler = RAMSESOctreeContainer(self.ds.domain_dimensions/2,
+ self.ds.domain_left_edge, self.ds.domain_right_edge)
+ root_nodes = self.amr_header['numbl'][self.ds.min_level,:].sum()
+ self.oct_handler.allocate_domains(self.total_oct_count, root_nodes)
+ fb = open(self.amr_fn, "rb")
+ fb.seek(self.amr_offset)
+ f = cStringIO.StringIO()
+ f.write(fb.read())
+ f.seek(0)
+ mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)",
+ self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum())
+ def _ng(c, l):
+ if c < self.amr_header['ncpu']:
+ ng = self.amr_header['numbl'][l, c]
+ else:
+ ng = self.ngridbound[c - self.amr_header['ncpu'] +
+ self.amr_header['nboundary']*l]
+ return ng
+ min_level = self.ds.min_level
+ max_level = min_level
+ nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
+ for level in range(self.amr_header['nlevelmax']):
+ # Easier if do this 1-indexed
+ for cpu in range(self.amr_header['nboundary'] + self.amr_header['ncpu']):
+ #ng is the number of octs on this level on this domain
+ ng = _ng(cpu, level)
+ if ng == 0: continue
+ ind = fpu.read_vector(f, "I").astype("int64")
+ fpu.skip(f, 2)
+ pos = np.empty((ng, 3), dtype='float64')
+ pos[:,0] = fpu.read_vector(f, "d") - nx
+ pos[:,1] = fpu.read_vector(f, "d") - ny
+ pos[:,2] = fpu.read_vector(f, "d") - nz
+ #pos *= self.ds.domain_width
+ #pos += self.dataset.domain_left_edge
+ fpu.skip(f, 31)
+ #parents = fpu.read_vector(f, "I")
+ #fpu.skip(f, 6)
+ #children = np.empty((ng, 8), dtype='int64')
+ #for i in range(8):
+ # children[:,i] = fpu.read_vector(f, "I")
+ #cpu_map = np.empty((ng, 8), dtype="int64")
+ #for i in range(8):
+ # cpu_map[:,i] = fpu.read_vector(f, "I")
+ #rmap = np.empty((ng, 8), dtype="int64")
+ #for i in range(8):
+ # rmap[:,i] = fpu.read_vector(f, "I")
+ # We don't want duplicate grids.
+ # Note that we're adding *grids*, not individual cells.
+ if level >= min_level:
+ assert(pos.shape[0] == ng)
+ n = self.oct_handler.add(cpu + 1, level - min_level, pos,
+ count_boundary = 1)
+ self._error_check(cpu, level, pos, n, ng, (nx, ny, nz))
+ if n > 0: max_level = max(level - min_level, max_level)
+ self.max_level = max_level
+ self.oct_handler.finalize()
+
+ def _error_check(self, cpu, level, pos, n, ng, nn):
+ # NOTE: We have the second conditional here because internally, it will
+ # not add any octs in that case.
+ if n == ng or cpu + 1 > self.oct_handler.num_domains:
+ return
+ # This is where we now check for issues with creating the new octs, and
+ # we attempt to determine what precisely is going wrong.
+ # These are all print statements.
+ print "We have detected an error with the construction of the Octree."
+ print " The number of Octs to be added : %s" % ng
+ print " The number of Octs added : %s" % n
+ print " Level : %s" % level
+ print " CPU Number (0-indexed) : %s" % cpu
+ for i, ax in enumerate('xyz'):
+ print " extent [%s] : %s %s" % \
+ (ax, pos[:,i].min(), pos[:,i].max())
+ print " domain left : %s" % \
+ (self.ds.domain_left_edge,)
+ print " domain right : %s" % \
+ (self.ds.domain_right_edge,)
+ print " offset applied : %s %s %s" % \
+ (nn[0], nn[1], nn[2])
+ print "AMR Header:"
+ for key in sorted(self.amr_header):
+ print " %-30s: %s" % (key, self.amr_header[key])
+ raise RuntimeError
+
+ def included(self, selector):
+ if getattr(selector, "domain_id", None) is not None:
+ return selector.domain_id == self.domain_id
+ domain_ids = self.oct_handler.domain_identify(selector)
+ return self.domain_id in domain_ids
+
+class RAMSESDomainSubset(OctreeSubset):
+
+ _domain_offset = 1
+
+ def fill(self, content, fields, selector):
+ # Here we get a copy of the file, which we skip through and read the
+ # bits we want.
+ oct_handler = self.oct_handler
+ all_fields = self.domain.ds.index.fluid_field_list
+ fields = [f for ft, f in fields]
+ tr = {}
+ cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
+ levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
+ selector, self.domain_id, cell_count)
+ for field in fields:
+ tr[field] = np.zeros(cell_count, 'float64')
+ for level, offset in enumerate(self.domain.hydro_offset):
+ if offset == -1: continue
+ content.seek(offset)
+ nc = self.domain.level_count[level]
+ temp = {}
+ for field in all_fields:
+ temp[field] = np.empty((nc, 8), dtype="float64")
+ for i in range(8):
+ for field in all_fields:
+ if field not in fields:
+ fpu.skip(content)
+ else:
+ temp[field][:,i] = fpu.read_vector(content, 'd') # cell 1
+ oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp)
+ return tr
+
+class RAMSESIndex(OctreeIndex):
+
+ def __init__(self, ds, dataset_type='ramses'):
+ self._ds = ds # TODO: Figure out the class composition better!
+ self.fluid_field_list = ds._fields_in_file
+ self.dataset_type = dataset_type
+ self.dataset = weakref.proxy(ds)
+ self.index_filename = self.dataset.parameter_filename
+ self.directory = os.path.dirname(self.index_filename)
+ self.max_level = None
+
+ self.float_type = np.float64
+ super(RAMSESIndex, self).__init__(ds, dataset_type)
+
+ def _initialize_oct_handler(self):
+ self.domains = [RAMSESDomainFile(self.dataset, i + 1)
+ for i in range(self.dataset['ncpu'])]
+ total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
+ for dom in self.domains)
+ self.max_level = max(dom.max_level for dom in self.domains)
+ self.num_grids = total_octs
+
+ def _detect_output_fields(self):
+ # Do we want to attempt to figure out what the fields are in the file?
+ dsl = set([])
+ if self.fluid_field_list is None or len(self.fluid_field_list) <= 0:
+ self._setup_auto_fields()
+ for domain in self.domains:
+ dsl.update(set(domain.particle_field_offsets.keys()))
+ self.particle_field_list = list(dsl)
+ self.field_list = [("ramses", f) for f in self.fluid_field_list] \
+ + self.particle_field_list
+
+ def _setup_auto_fields(self):
+ '''
+ If no fluid fields are set, the code tries to set up a fluids array by hand
+ '''
+ # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
+ # Find nvar
+
+
+ # TODO: copy/pasted from DomainFile; needs refactoring!
+ num = os.path.basename(self._ds.parameter_filename).split("."
+ )[0].split("_")[1]
+ testdomain = 1 # Just pick the first domain file to read
+ basename = "%s/%%s_%s.out%05i" % (
+ os.path.abspath(
+ os.path.dirname(self._ds.parameter_filename)),
+ num, testdomain)
+ hydro_fn = basename % "hydro"
+ # Do we have a hydro file?
+ if not os.path.exists(hydro_fn):
+ self.fluid_field_list = []
+ return
+ # Read the number of hydro variables
+ f = open(hydro_fn, "rb")
+ hydro_header = ( ('ncpu', 1, 'i'),
+ ('nvar', 1, 'i'),
+ ('ndim', 1, 'i'),
+ ('nlevelmax', 1, 'i'),
+ ('nboundary', 1, 'i'),
+ ('gamma', 1, 'd')
+ )
+ hvals = fpu.read_attrs(f, hydro_header)
+ self.ds.gamma = hvals['gamma']
+ nvar = hvals['nvar']
+ # OK, we got NVAR, now set up the arrays depending on what NVAR is
+ # Allow some wiggle room for users to add too many variables
+ if nvar < 5:
+ mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
+ raise ValueError
+ # Basic hydro runs
+ if nvar == 5:
+ fields = ["Density",
+ "x-velocity", "y-velocity", "z-velocity",
+ "Pressure"]
+ if nvar > 5 and nvar < 11:
+ fields = ["Density",
+ "x-velocity", "y-velocity", "z-velocity",
+ "Pressure", "Metallicity"]
+ # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
+ if nvar == 11:
+ fields = ["Density",
+ "x-velocity", "y-velocity", "z-velocity",
+ "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+ "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
+ "Pressure"]
+ if nvar > 11:
+ fields = ["Density",
+ "x-velocity", "y-velocity", "z-velocity",
+ "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+ "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
+ "Pressure","Metallicity"]
+ while len(fields) < nvar:
+ fields.append("var"+str(len(fields)))
+ mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))
+ self.fluid_field_list = fields
+
+ def _identify_base_chunk(self, dobj):
+ if getattr(dobj, "_chunk_info", None) is None:
+ domains = [dom for dom in self.domains if
+ dom.included(dobj.selector)]
+ base_region = getattr(dobj, "base_region", dobj)
+ if len(domains) > 1:
+ mylog.debug("Identified %s intersecting domains", len(domains))
+ subsets = [RAMSESDomainSubset(base_region, domain, self.dataset)
+ for domain in domains]
+ dobj._chunk_info = subsets
+ dobj._current_chunk = list(self._chunk_all(dobj))[0]
+
+ def _chunk_all(self, dobj):
+ oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+ yield YTDataChunk(dobj, "all", oobjs, None)
+
+ def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
+ sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+ for i,og in enumerate(sobjs):
+ if ngz > 0:
+ g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+ else:
+ g = og
+ yield YTDataChunk(dobj, "spatial", [g], None)
+
+ def _chunk_io(self, dobj, cache = True, local_only = False):
+ oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+ for subset in oobjs:
+ yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
+
+class RAMSESDataset(Dataset):
+ _index_class = RAMSESIndex
+ _field_info_class = RAMSESFieldInfo
+ gamma = 1.4 # This will get replaced on hydro_fn open
+
+ def __init__(self, filename, dataset_type='ramses',
+ fields = None, storage_filename = None):
+ # Here we want to initiate a traceback, if the reader is not built.
+ if isinstance(fields, types.StringTypes):
+ fields = field_aliases[fields]
+ '''
+ fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
+ If set to None, will try a default set of fields
+ '''
+ self.fluid_types += ("ramses",)
+ self._fields_in_file = fields
+ Dataset.__init__(self, filename, dataset_type)
+ self.storage_filename = storage_filename
+
+ def __repr__(self):
+ return self.basename.rsplit(".", 1)[0]
+
+ def _set_code_unit_attributes(self):
+ """
+ Generates the conversion to various physical _units based on the parameter file
+ """
+ # Note that unit_l *already* converts to proper!
+ # Also note that unit_l must be multiplied by the boxlen parameter to
+ # ensure we are correctly set up for the current domain.
+ length_unit = self.parameters['unit_l'] * self.parameters['boxlen']
+ rho_u = self.parameters['unit_d']/ self.parameters['boxlen']**3
+ # We're not multiplying by the boxlength here.
+ mass_unit = rho_u * self.parameters['unit_l']**3
+ # Cosmological runs are done in lookback conformal time.
+ #To convert to proper time, the time unit is calculated from
+ # the expansion factor.
+ time_unit = self.parameters['unit_t']
+ magnetic_unit = np.sqrt(4*np.pi * mass_unit /
+ (time_unit**2 * length_unit))
+ self.magnetic_unit = self.quan(magnetic_unit, "gauss")
+ self.length_unit = self.quan(length_unit, "cm")
+ self.mass_unit = self.quan(mass_unit, "g")
+ self.time_unit = self.quan(time_unit, "s")
+ self.velocity_unit = self.length_unit / self.time_unit
+
+ def _parse_parameter_file(self):
+ # hardcoded for now
+ # These should be explicitly obtained from the file, but for now that
+ # will wait until a reorganization of the source tree and better
+ # generalization.
+ self.dimensionality = 3
+ self.refine_by = 2
+ self.parameters["HydroMethod"] = 'ramses'
+ self.parameters["Time"] = 1. # default unit is 1...
+
+ self.unique_identifier = \
+ int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+ # We now execute the same logic Oliver's code does
+ rheader = {}
+ f = open(self.parameter_filename)
+ def read_rhs(cast):
+ line = f.readline()
+ p, v = line.split("=")
+ rheader[p.strip()] = cast(v)
+ for i in range(6): read_rhs(int)
+ f.readline()
+ for i in range(11): read_rhs(float)
+ f.readline()
+ read_rhs(str)
+ # This next line deserves some comment. We specify a min_level that
+ # corresponds to the minimum level in the RAMSES simulation. RAMSES is
+ # one-indexed, but it also does refer to the *oct* dimensions -- so
+ # this means that a levelmin of 1 would have *1* oct in it. So a
+ # levelmin of 2 would have 8 octs at the root mesh level.
+ self.min_level = rheader['levelmin'] - 1
+ # Now we read the hilbert indices
+ self.hilbert_indices = {}
+ if rheader['ordering type'] == "hilbert":
+ f.readline() # header
+ for n in range(rheader['ncpu']):
+ dom, mi, ma = f.readline().split()
+ self.hilbert_indices[int(dom)] = (float(mi), float(ma))
+ self.parameters.update(rheader)
+ self.current_time = self.parameters['time']
+ self.domain_left_edge = np.zeros(3, dtype='float64')
+ self.domain_dimensions = np.ones(3, dtype='int32') * \
+ 2**(self.min_level+1)
+ self.domain_right_edge = np.ones(3, dtype='float64')
+ # This is likely not true, but I am not sure how to otherwise
+ # distinguish them.
+ mylog.warning("RAMSES frontend assumes all simulations are cosmological!")
+ self.cosmological_simulation = 1
+ self.periodicity = (True, True, True)
+ self.current_redshift = (1.0 / rheader["aexp"]) - 1.0
+ self.omega_lambda = rheader["omega_l"]
+ self.omega_matter = rheader["omega_m"]
+ self.hubble_constant = rheader["H0"] / 100.0 # This is H100
+ self.max_level = rheader['levelmax'] - self.min_level
+
+ @classmethod
+ def _is_valid(self, *args, **kwargs):
+ if not os.path.basename(args[0]).startswith("info_"): return False
+ fn = args[0].replace("info_", "amr_").replace(".txt", ".out00001")
+ return os.path.exists(fn)
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/1c83632dd94f/
Changeset: 1c83632dd94f
Branch: yt
User: RicardaBeckmann
Date: 2015-06-05 23:27:58+00:00
Summary: fix bugs in unit handling for ramses
Affected #: 1 file
diff -r 08dd5da61eb6a1fd145a1f4046cb63269a142e40 -r 1c83632dd94f97b2eb05f4f41d64ff14ece34ffa yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -494,13 +494,14 @@
#Please note that for all units given in the info file, the boxlen
#still needs to be folded in, as shown below!
- length_unit = self.parameters['unit_l'] * self.parameters['boxlen']
- rho_u = self.parameters['unit_d']/ self.parameters['boxlen']**3
+ boxlen=self.parameters['boxlen']
+ length_unit = self.parameters['unit_l'] * boxlen
+ density_unit = self.parameters['unit_d']/ boxlen**3
# In the mass unit, the factors of boxlen cancel back out, so this
#is equivalent to unit_d*unit_l**3
- mass_unit = rho_u * length_unit**3
+ mass_unit = density_unit * length_unit**3
# Cosmological runs are done in lookback conformal time.
# To convert to proper time, the time unit is calculated from
https://bitbucket.org/yt_analysis/yt/commits/62ee0fcb0c7b/
Changeset: 62ee0fcb0c7b
Branch: yt
User: RicardaBeckmann
Date: 2015-06-05 23:35:17+00:00
Summary: deleted accidentally tracked files
Affected #: 10 files
diff -r 1c83632dd94f97b2eb05f4f41d64ff14ece34ffa -r 62ee0fcb0c7bc9acdc32370c18ff3967527bf33b yt/fields/particle_fields.py~
--- a/yt/fields/particle_fields.py~
+++ /dev/null
@@ -1,576 +0,0 @@
-"""
-These are common particle fields.
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.funcs import *
-from yt.units.yt_array import YTArray
-from yt.fields.derived_field import \
- ValidateParameter, \
- ValidateSpatial
-from yt.utilities.physical_constants import \
- mass_hydrogen_cgs, \
- mass_sun_cgs, \
- mh
-
-from yt.units.yt_array import \
- uconcatenate
-
-from yt.utilities.math_utils import \
- get_sph_r_component, \
- get_sph_theta_component, \
- get_sph_phi_component, \
- get_cyl_r_component, \
- get_cyl_z_component, \
- get_cyl_theta_component, \
- get_cyl_r, get_cyl_theta, \
- get_cyl_z, get_sph_r, \
- get_sph_theta, get_sph_phi, \
- periodic_dist, euclidean_dist
-
-from .vector_operations import \
- create_magnitude_field
-
-def _field_concat(fname):
- def _AllFields(field, data):
- v = []
- for ptype in data.ds.particle_types:
- data.ds._last_freq = (ptype, None)
- if ptype == "all" or \
- ptype in data.ds.known_filters:
- continue
- v.append(data[ptype, fname].copy())
- rv = uconcatenate(v, axis=0)
- return rv
- return _AllFields
-
-def _field_concat_slice(fname, axi):
- def _AllFields(field, data):
- v = []
- for ptype in data.ds.particle_types:
- data.ds._last_freq = (ptype, None)
- if ptype == "all" or \
- ptype in data.ds.known_filters:
- continue
- v.append(data[ptype, fname][:,axi])
- rv = uconcatenate(v, axis=0)
- return rv
- return _AllFields
-
-def particle_deposition_functions(ptype, coord_name, mass_name, registry):
- orig = set(registry.keys())
- ptype_dn = ptype.replace("_","\/").title()
- def particle_count(field, data):
- pos = data[ptype, coord_name]
- d = data.deposit(pos, method = "count")
- d = data.ds.arr(d, input_units = "cm**-3")
- return data.apply_units(d, field.units)
-
- registry.add_field(("deposit", "%s_count" % ptype),
- function = particle_count,
- validators = [ValidateSpatial()],
- display_name = "\\mathrm{%s Count}" % ptype_dn)
-
- def particle_mass(field, data):
- pos = data[ptype, coord_name]
- pmass = data[ptype, mass_name].in_units(field.units)
- d = data.deposit(pos, [pmass], method = "sum")
- return data.apply_units(d, field.units)
-
- registry.add_field(("deposit", "%s_mass" % ptype),
- function = particle_mass,
- validators = [ValidateSpatial()],
- display_name = "\\mathrm{%s Mass}" % ptype_dn,
- units = "g")
-
- def particle_density(field, data):
- pos = data[ptype, coord_name]
- mass = data[ptype, mass_name]
- pos.convert_to_units("code_length")
- mass.convert_to_units("code_mass")
- d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
- d = data.ds.arr(d, "code_mass")
- d /= data["index", "cell_volume"]
- return d
-
- registry.add_field(("deposit", "%s_density" % ptype),
- function = particle_density,
- validators = [ValidateSpatial()],
- display_name = "\\mathrm{%s Density}" % ptype_dn,
- units = "g/cm**3")
-
- def particle_cic(field, data):
- pos = data[ptype, coord_name]
- d = data.deposit(pos, [data[ptype, mass_name]], method = "cic")
- d = data.apply_units(d, data[ptype, mass_name].units)
- d /= data["index", "cell_volume"]
- return d
-
- registry.add_field(("deposit", "%s_cic" % ptype),
- function = particle_cic,
- validators = [ValidateSpatial()],
- display_name = "\\mathrm{%s CIC Density}" % ptype_dn,
- units = "g/cm**3")
-
- def _get_density_weighted_deposit_field(fname, units, method):
- def _deposit_field(field, data):
- """
- Create a grid field for particle quantities weighted by particle
- mass, using cloud-in-cell deposit.
- """
- pos = data[ptype, "particle_position"]
- # Get back into density
- pden = data[ptype, 'particle_mass']
- top = data.deposit(pos, [data[(ptype, fname)]*pden], method=method)
- bottom = data.deposit(pos, [pden], method=method)
- top[bottom == 0] = 0.0
- bnz = bottom.nonzero()
- top[bnz] /= bottom[bnz]
- d = data.ds.arr(top, input_units=units)
- return d
- return _deposit_field
-
- for ax in 'xyz':
- for method, name in zip(("cic", "sum"), ("cic", "nn")):
- function = _get_density_weighted_deposit_field(
- "particle_velocity_%s" % ax, "cm/s", method)
- registry.add_field(
- ("deposit", ("%s_"+name+"_velocity_%s") % (ptype, ax)),
- function=function, units="cm/s", take_log=False,
- validators=[ValidateSpatial(0)])
-
- # Now some translation functions.
-
- def particle_ones(field, data):
- v = np.ones(data[ptype, mass_name].shape, dtype="float64")
- return data.apply_units(v, field.units)
-
- registry.add_field((ptype, "particle_ones"),
- function = particle_ones,
- particle_type = True,
- units = "")
-
- def particle_mesh_ids(field, data):
- pos = data[ptype, coord_name]
- ids = np.zeros(pos.shape[0], dtype="float64") - 1
- # This is float64 in name only. It will be properly cast inside the
- # deposit operation.
- #_ids = ids.view("float64")
- data.deposit(pos, [ids], method = "mesh_id")
- return data.apply_units(ids, "")
- registry.add_field((ptype, "mesh_id"),
- function = particle_mesh_ids,
- validators = [ValidateSpatial()],
- particle_type = True)
-
- return list(set(registry.keys()).difference(orig))
-
-def particle_scalar_functions(ptype, coord_name, vel_name, registry):
-
- # Now we have to set up the various velocity and coordinate things. In the
- # future, we'll actually invert this and use the 3-component items
- # elsewhere, and stop using these.
-
- # Note that we pass in _ptype here so that it's defined inside the closure.
-
- def _get_coord_funcs(axi, _ptype):
- def _particle_velocity(field, data):
- return data[_ptype, vel_name][:,axi]
- def _particle_position(field, data):
- return data[_ptype, coord_name][:,axi]
- return _particle_velocity, _particle_position
- for axi, ax in enumerate("xyz"):
- v, p = _get_coord_funcs(axi, ptype)
- registry.add_field((ptype, "particle_velocity_%s" % ax),
- particle_type = True, function = v,
- units = "code_velocity")
- registry.add_field((ptype, "particle_position_%s" % ax),
- particle_type = True, function = p,
- units = "code_length")
-
-def particle_vector_functions(ptype, coord_names, vel_names, registry):
-
- # This will column_stack a set of scalars to create vector fields.
-
- def _get_vec_func(_ptype, names):
- def particle_vectors(field, data):
- v = [data[_ptype, name].in_units(field.units)
- for name in names]
- c = np.column_stack(v)
- return data.apply_units(c, field.units)
- return particle_vectors
- registry.add_field((ptype, "particle_position"),
- function=_get_vec_func(ptype, coord_names),
- units = "code_length",
- particle_type=True)
- registry.add_field((ptype, "particle_velocity"),
- function=_get_vec_func(ptype, vel_names),
- units = "cm / s",
- particle_type=True)
-
-def standard_particle_fields(registry, ptype,
- spos = "particle_position_%s",
- svel = "particle_velocity_%s"):
- # This function will set things up based on the scalar fields and standard
- # yt field names.
-
- def _particle_velocity_magnitude(field, data):
- """ M{|v|} """
- bulk_velocity = data.get_field_parameter("bulk_velocity")
- if bulk_velocity is None:
- bulk_velocity = np.zeros(3)
- return np.sqrt((data[ptype, svel % 'x'] - bulk_velocity[0])**2
- + (data[ptype, svel % 'y'] - bulk_velocity[1])**2
- + (data[ptype, svel % 'z'] - bulk_velocity[2])**2 )
-
- registry.add_field((ptype, "particle_velocity_magnitude"),
- function=_particle_velocity_magnitude,
- particle_type=True,
- take_log=False,
- units="cm/s")
-
- def _particle_specific_angular_momentum(field, data):
- """
- Calculate the angular of a particle velocity. Returns a vector for each
- particle.
- """
- if data.has_field_parameter("bulk_velocity"):
- bv = data.get_field_parameter("bulk_velocity")
- else: bv = np.zeros(3, dtype=np.float64)
- xv = data[ptype, svel % 'x'] - bv[0]
- yv = data[ptype, svel % 'y'] - bv[1]
- zv = data[ptype, svel % 'z'] - bv[2]
- center = data.get_field_parameter('center')
- coords = YTArray([data[ptype, spos % 'x'],
- data[ptype, spos % 'y'],
- data[ptype, spos % 'z']], dtype=np.float64)
- new_shape = tuple([3] + [1]*(len(coords.shape)-1))
- r_vec = coords - np.reshape(center,new_shape)
- v_vec = YTArray([xv,yv,zv], dtype=np.float64)
- return np.cross(r_vec, v_vec, axis=0)
-
- registry.add_field((ptype, "particle_specific_angular_momentum"),
- function=_particle_specific_angular_momentum,
- particle_type=True,
- units="cm**2/s",
- validators=[ValidateParameter("center")])
-
- def _particle_specific_angular_momentum_x(field, data):
- if data.has_field_parameter("bulk_velocity"):
- bv = data.get_field_parameter("bulk_velocity")
- else: bv = np.zeros(3, dtype=np.float64)
- center = data.get_field_parameter('center')
- y = data[ptype, spos % "y"] - center[1]
- z = data[ptype, spos % "z"] - center[2]
- yv = data[ptype, svel % "y"] - bv[1]
- zv = data[ptype, svel % "z"] - bv[2]
- return yv*z - zv*y
-
- registry.add_field((ptype, "particle_specific_angular_momentum_x"),
- function=_particle_specific_angular_momentum_x,
- particle_type=True,
- units="cm**2/s",
- validators=[ValidateParameter("center")])
-
- def _particle_specific_angular_momentum_y(field, data):
- if data.has_field_parameter("bulk_velocity"):
- bv = data.get_field_parameter("bulk_velocity")
- else: bv = np.zeros(3, dtype=np.float64)
- center = data.get_field_parameter('center')
- x = data[ptype, spos % "x"] - center[0]
- z = data[ptype, spos % "z"] - center[2]
- xv = data[ptype, svel % "x"] - bv[0]
- zv = data[ptype, svel % "z"] - bv[2]
- return -(xv*z - zv*x)
-
- registry.add_field((ptype, "particle_specific_angular_momentum_y"),
- function=_particle_specific_angular_momentum_y,
- particle_type=True,
- units="cm**2/s",
- validators=[ValidateParameter("center")])
-
- def _particle_specific_angular_momentum_z(field, data):
- if data.has_field_parameter("bulk_velocity"):
- bv = data.get_field_parameter("bulk_velocity")
- else: bv = np.zeros(3, dtype=np.float64)
- center = data.get_field_parameter('center')
- x = data[ptype, spos % "x"] - center[0]
- y = data[ptype, spos % "y"] - center[1]
- xv = data[ptype, svel % "x"] - bv[0]
- yv = data[ptype, svel % "y"] - bv[1]
- return xv*y - yv*x
-
- registry.add_field((ptype, "particle_specific_angular_momentum_z"),
- function=_particle_specific_angular_momentum_z,
- particle_type=True,
- units="cm**2/s",
- validators=[ValidateParameter("center")])
-
- create_magnitude_field(registry, "particle_specific_angular_momentum",
- "cm**2/s", ftype=ptype, particle_type=True)
-
- def _particle_angular_momentum_x(field, data):
- return data[ptype, "particle_mass"] * \
- data[ptype, "particle_specific_angular_momentum_x"]
- registry.add_field((ptype, "particle_angular_momentum_x"),
- function=_particle_angular_momentum_x,
- units="g*cm**2/s", particle_type=True,
- validators=[ValidateParameter('center')])
-
- def _particle_angular_momentum_y(field, data):
- return data[ptype, "particle_mass"] * \
- data[ptype, "particle_specific_angular_momentum_y"]
- registry.add_field((ptype, "particle_angular_momentum_y"),
- function=_particle_angular_momentum_y,
- units="g*cm**2/s", particle_type=True,
- validators=[ValidateParameter('center')])
-
- def _particle_angular_momentum_z(field, data):
- return data[ptype, "particle_mass"] * \
- data[ptype, "particle_specific_angular_momentum_z"]
- registry.add_field((ptype, "particle_angular_momentum_z"),
- function=_particle_angular_momentum_z,
- units="g*cm**2/s", particle_type=True,
- validators=[ValidateParameter('center')])
-
- def _particle_angular_momentum(field, data):
- return data[ptype, "particle_mass"] \
- * data[ptype, "particle_specific_angular_momentum"]
- registry.add_field((ptype, "particle_angular_momentum"),
- function=_particle_angular_momentum,
- particle_type=True,
- units="g*cm**2/s",
- validators=[ValidateParameter("center")])
-
- create_magnitude_field(registry, "particle_angular_momentum",
- "g*cm**2/s", ftype=ptype, particle_type=True)
-
- from .field_functions import \
- get_radius
-
- def _particle_radius(field, data):
- return get_radius(data, "particle_position_")
- registry.add_field((ptype, "particle_radius"),
- function=_particle_radius,
- validators=[ValidateParameter("center")],
- units="cm", particle_type = True,
- display_name = "Particle Radius")
-
- def _particle_spherical_position_radius(field, data):
- """
- Radial component of the particles' position vectors in spherical coords
- on the provided field parameters for 'normal', 'center', and
- 'bulk_velocity',
- """
- normal = data.get_field_parameter('normal')
- center = data.get_field_parameter('center')
- bv = data.get_field_parameter("bulk_velocity")
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
- theta = get_sph_theta(pos, center)
- phi = get_sph_phi(pos, center)
- pos = pos - np.reshape(center, (3, 1))
- sphr = get_sph_r_component(pos, theta, phi, normal)
- return sphr
-
- registry.add_field((ptype, "particle_spherical_position_radius"),
- function=_particle_spherical_position_radius,
- particle_type=True, units="cm",
- validators=[ValidateParameter("normal"),
- ValidateParameter("center")])
-
- def _particle_spherical_position_theta(field, data):
- """
- Theta component of the particles' position vectors in spherical coords
- on the provided field parameters for 'normal', 'center', and
- 'bulk_velocity',
- """
- normal = data.get_field_parameter('normal')
- center = data.get_field_parameter('center')
- bv = data.get_field_parameter("bulk_velocity")
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
- theta = get_sph_theta(pos, center)
- phi = get_sph_phi(pos, center)
- pos = pos - np.reshape(center, (3, 1))
- spht = get_sph_theta_component(pos, theta, phi, normal)
- return spht
-
- registry.add_field((ptype, "particle_spherical_position_theta"),
- function=_particle_spherical_position_theta,
- particle_type=True, units="cm",
- validators=[ValidateParameter("normal"),
- ValidateParameter("center")])
-
- def _particle_spherical_position_phi(field, data):
- """
- Phi component of the particles' position vectors in spherical coords
- on the provided field parameters for 'normal', 'center', and
- 'bulk_velocity',
- """
- normal = data.get_field_parameter('normal')
- center = data.get_field_parameter('center')
- bv = data.get_field_parameter("bulk_velocity")
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
- theta = get_sph_theta(pos, center)
- phi = get_sph_phi(pos, center)
- pos = pos - np.reshape(center, (3, 1))
- sphp = get_sph_phi_component(pos, phi, normal)
- return sphp
-
- registry.add_field((ptype, "particle_spherical_position_phi"),
- function=_particle_spherical_position_phi,
- particle_type=True, units="cm",
- validators=[ValidateParameter("normal"),
- ValidateParameter("center")])
-
- def _particle_spherical_velocity_radius(field, data):
- """
- Radial component of the particles' velocity vectors in spherical coords
- based on the provided field parameters for 'normal', 'center', and
- 'bulk_velocity',
- """
- normal = data.get_field_parameter('normal')
- center = data.get_field_parameter('center')
- bv = data.get_field_parameter("bulk_velocity")
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
- vel = svel
- vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
- theta = get_sph_theta(pos, center)
- phi = get_sph_phi(pos, center)
- pos = pos - np.reshape(center, (3, 1))
- vel = vel - np.reshape(bv, (3, 1))
- sphr = get_sph_r_component(vel, theta, phi, normal)
- return sphr
-
- registry.add_field((ptype, "particle_spherical_velocity_radius"),
- function=_particle_spherical_velocity_radius,
- particle_type=True, units="cm/s",
- validators=[ValidateParameter("normal"),
- ValidateParameter("center")])
-
- # This is simply aliased to "particle_spherical_velocity_radius"
- # for ease of use.
- registry.add_field((ptype, "particle_radial_velocity"),
- function=_particle_spherical_velocity_radius,
- particle_type=True, units="cm/s",
- validators=[ValidateParameter("normal"),
- ValidateParameter("center")])
-
- def _particle_spherical_velocity_theta(field, data):
- """
- Theta component of the particles' velocity vectors in spherical coords
- based on the provided field parameters for 'normal', 'center', and
- 'bulk_velocity',
- """
- normal = data.get_field_parameter('normal')
- center = data.get_field_parameter('center')
- bv = data.get_field_parameter("bulk_velocity")
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
- vel = svel
- vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
- theta = get_sph_theta(pos, center)
- phi = get_sph_phi(pos, center)
- pos = pos - np.reshape(center, (3, 1))
- vel = vel - np.reshape(bv, (3, 1))
- spht = get_sph_theta_component(vel, theta, phi, normal)
- return spht
-
- registry.add_field((ptype, "particle_spherical_velocity_theta"),
- function=_particle_spherical_velocity_theta,
- particle_type=True, units="cm/s",
- validators=[ValidateParameter("normal"),
- ValidateParameter("center")])
-
- def _particle_spherical_velocity_phi(field, data):
- """
- Phi component of the particles' velocity vectors in spherical coords
- based on the provided field parameters for 'normal', 'center', and
- 'bulk_velocity',
- """
- normal = data.get_field_parameter('normal')
- center = data.get_field_parameter('center')
- bv = data.get_field_parameter("bulk_velocity")
- pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
- vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
- theta = get_sph_theta(pos, center)
- phi = get_sph_phi(pos, center)
- pos = pos - np.reshape(center, (3, 1))
- vel = vel - np.reshape(bv, (3, 1))
- sphp = get_sph_phi_component(vel, phi, normal)
- return sphp
-
- registry.add_field((ptype, "particle_spherical_velocity_phi"),
- function=_particle_spherical_velocity_phi,
- particle_type=True, units="cm/s",
- validators=[ValidateParameter("normal"),
- ValidateParameter("center")])
-
-def add_particle_average(registry, ptype, field_name,
- weight = "particle_mass",
- density = True):
- field_units = registry[ptype, field_name].units
- def _pfunc_avg(field, data):
- pos = data[ptype, "particle_position"]
- f = data[ptype, field_name]
- wf = data[ptype, weight]
- f *= wf
- v = data.deposit(pos, [f], method = "sum")
- w = data.deposit(pos, [wf], method = "sum")
- v /= w
- if density: v /= data["index", "cell_volume"]
- v[np.isnan(v)] = 0.0
- return v
- fn = ("deposit", "%s_avg_%s" % (ptype, field_name))
- registry.add_field(fn, function=_pfunc_avg,
- validators = [ValidateSpatial(0)],
- particle_type = False,
- units = field_units)
- return fn
-
-def add_volume_weighted_smoothed_field(ptype, coord_name, mass_name,
- smoothing_length_name, density_name, smoothed_field, registry,
- nneighbors = None):
- field_name = ("deposit", "%s_smoothed_%s" % (ptype, smoothed_field))
- field_units = registry[ptype, smoothed_field].units
- def _vol_weight(field, data):
- pos = data[ptype, coord_name].in_units("code_length")
- mass = data[ptype, mass_name].in_cgs()
- dens = data[ptype, density_name].in_cgs()
- quan = data[ptype, smoothed_field].in_units(field_units)
- if smoothing_length_name is None:
- hsml = np.zeros(quan.shape, dtype='float64') - 1
- hsml = data.apply_units(hsml, "code_length")
- else:
- hsml = data[ptype, smoothing_length_name].in_units("code_length")
- kwargs = {}
- if nneighbors:
- kwargs['nneighbors'] = nneighbors
- rv = data.smooth(pos, [mass, hsml, dens, quan],
- method="volume_weighted",
- create_octree = True)[0]
- rv[np.isnan(rv)] = 0.0
- # Now some quick unit conversions.
- rv = data.apply_units(rv, field_units)
- return rv
- registry.add_field(field_name, function = _vol_weight,
- validators = [ValidateSpatial(0)],
- units = field_units)
- return [field_name]
-
diff -r 1c83632dd94f97b2eb05f4f41d64ff14ece34ffa -r 62ee0fcb0c7bc9acdc32370c18ff3967527bf33b yt/frontends/ramses/data_structures.py.orig
--- a/yt/frontends/ramses/data_structures.py.orig
+++ /dev/null
@@ -1,596 +0,0 @@
-"""
-RAMSES-specific data structures
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import os
-import numpy as np
-import stat
-import weakref
-import cStringIO
-<<<<<<< local
-import scipy.integrate
-
-=======
->>>>>>> other
-from yt.funcs import *
-from yt.geometry.oct_geometry_handler import \
- OctreeIndex
-from yt.geometry.geometry_handler import \
- Index, YTDataChunk
-from yt.data_objects.static_output import \
- Dataset
-from yt.data_objects.octree_subset import \
- OctreeSubset
-
-from .definitions import ramses_header, field_aliases
-from yt.utilities.lib.misc_utilities import \
- get_box_grids_level
-from yt.utilities.io_handler import \
- io_registry
-from yt.utilities.physical_constants import mp, kb
-from .fields import \
- RAMSESFieldInfo, _X
-import yt.utilities.fortran_utils as fpu
-from yt.geometry.oct_container import \
- RAMSESOctreeContainer
-from yt.fields.particle_fields import \
- standard_particle_fields
-
-class RAMSESDomainFile(object):
- _last_mask = None
- _last_selector_id = None
-
- def __init__(self, ds, domain_id):
- self.ds = ds
- self.domain_id = domain_id
- self.nvar = 0 # Set this later!
- num = os.path.basename(ds.parameter_filename).split("."
- )[0].split("_")[1]
- basename = "%s/%%s_%s.out%05i" % (
- os.path.abspath(
- os.path.dirname(ds.parameter_filename)),
- num, domain_id)
- for t in ['grav', 'hydro', 'part', 'amr']:
- setattr(self, "%s_fn" % t, basename % t)
- self._read_amr_header()
- self._read_hydro_header()
- self._read_particle_header()
- self._read_amr()
-
- _hydro_offset = None
- _level_count = None
-
- def __repr__(self):
- return "RAMSESDomainFile: %i" % self.domain_id
-
- def _is_hydro(self):
- '''
- Does the output include hydro?
- '''
- return os.path.exists(self.hydro_fn)
-
- @property
- def level_count(self):
- if self._level_count is not None: return self._level_count
- self.hydro_offset
- return self._level_count
-
- @property
- def hydro_offset(self):
- if self._hydro_offset is not None: return self._hydro_offset
- # We now have to open the file and calculate it
- f = open(self.hydro_fn, "rb")
- fpu.skip(f, 6)
- # It goes: level, CPU, 8-variable
- min_level = self.ds.min_level
- n_levels = self.amr_header['nlevelmax'] - min_level
- hydro_offset = np.zeros(n_levels, dtype='int64')
- hydro_offset -= 1
- level_count = np.zeros(n_levels, dtype='int64')
- skipped = []
- for level in range(self.amr_header['nlevelmax']):
- for cpu in range(self.amr_header['nboundary'] +
- self.amr_header['ncpu']):
- header = ( ('file_ilevel', 1, 'I'),
- ('file_ncache', 1, 'I') )
- try:
- hvals = fpu.read_attrs(f, header, "=")
- except AssertionError:
- print "You are running with the wrong number of fields."
- print "If you specified these in the load command, check the array length."
- print "In this file there are %s hydro fields." % skipped
- #print "The last set of field sizes was: %s" % skipped
- raise
- if hvals['file_ncache'] == 0: continue
- assert(hvals['file_ilevel'] == level+1)
- if cpu + 1 == self.domain_id and level >= min_level:
- hydro_offset[level - min_level] = f.tell()
- level_count[level - min_level] = hvals['file_ncache']
- skipped = fpu.skip(f, 8 * self.nvar)
- self._hydro_offset = hydro_offset
- self._level_count = level_count
- return self._hydro_offset
-
- def _read_hydro_header(self):
- # If no hydro file is found, return
- if not self._is_hydro():
- return
- if self.nvar > 0: return self.nvar
- # Read the number of hydro variables
- f = open(self.hydro_fn, "rb")
- fpu.skip(f, 1)
- self.nvar = fpu.read_vector(f, "i")[0]
-
- def _read_particle_header(self):
- if not os.path.exists(self.part_fn):
- self.local_particle_count = 0
- self.particle_field_offsets = {}
- return
- f = open(self.part_fn, "rb")
- f.seek(0, os.SEEK_END)
- flen = f.tell()
- f.seek(0)
- hvals = {}
- attrs = ( ('ncpu', 1, 'I'),
- ('ndim', 1, 'I'),
- ('npart', 1, 'I') )
- hvals.update(fpu.read_attrs(f, attrs))
- fpu.read_vector(f, 'I')
-
- attrs = ( ('nstar_tot', 1, 'I'),
- ('mstar_tot', 1, 'd'),
- ('mstar_lost', 1, 'd'),
- ('nsink', 1, 'I') )
- hvals.update(fpu.read_attrs(f, attrs))
- self.particle_header = hvals
- self.local_particle_count = hvals['npart']
- particle_fields = [
- ("particle_position_x", "d"),
- ("particle_position_y", "d"),
- ("particle_position_z", "d"),
- ("particle_velocity_x", "d"),
- ("particle_velocity_y", "d"),
- ("particle_velocity_z", "d"),
- ("particle_mass", "d"),
- ("particle_identifier", "I"),
- ("particle_refinement_level", "I")]
- if hvals["nstar_tot"] > 0:
- particle_fields += [("particle_age", "d"),
- ("particle_metallicity", "d")]
-
- field_offsets = {}
- _pfields = {}
- for field, vtype in particle_fields:
- if f.tell() >= flen: break
- field_offsets["io", field] = f.tell()
- _pfields["io", field] = vtype
- fpu.skip(f, 1)
- self.particle_field_offsets = field_offsets
- self.particle_field_types = _pfields
- self.particle_types = self.particle_types_raw = ("io",)
-
- def _read_amr_header(self):
- hvals = {}
- f = open(self.amr_fn, "rb")
- for header in ramses_header(hvals):
- hvals.update(fpu.read_attrs(f, header))
- # That's the header, now we skip a few.
- hvals['numbl'] = np.array(hvals['numbl']).reshape(
- (hvals['nlevelmax'], hvals['ncpu']))
- fpu.skip(f)
- if hvals['nboundary'] > 0:
- fpu.skip(f, 2)
- self.ngridbound = fpu.read_vector(f, 'i').astype("int64")
- else:
- self.ngridbound = np.zeros(hvals['nlevelmax'], dtype='int64')
- free_mem = fpu.read_attrs(f, (('free_mem', 5, 'i'), ) )
- ordering = fpu.read_vector(f, 'c')
- fpu.skip(f, 4)
- # Now we're at the tree itself
- # Now we iterate over each level and each CPU.
- self.amr_header = hvals
- self.amr_offset = f.tell()
- self.local_oct_count = hvals['numbl'][self.ds.min_level:, self.domain_id - 1].sum()
- self.total_oct_count = hvals['numbl'][self.ds.min_level:,:].sum(axis=0)
-
- def _read_amr(self):
- """Open the oct file, read in octs level-by-level.
- For each oct, only the position, index, level and domain
- are needed - its position in the octree is found automatically.
- The most important is finding all the information to feed
- oct_handler.add
- """
- self.oct_handler = RAMSESOctreeContainer(self.ds.domain_dimensions/2,
- self.ds.domain_left_edge, self.ds.domain_right_edge)
- root_nodes = self.amr_header['numbl'][self.ds.min_level,:].sum()
- self.oct_handler.allocate_domains(self.total_oct_count, root_nodes)
- fb = open(self.amr_fn, "rb")
- fb.seek(self.amr_offset)
- f = cStringIO.StringIO()
- f.write(fb.read())
- f.seek(0)
- mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)",
- self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum())
- def _ng(c, l):
- if c < self.amr_header['ncpu']:
- ng = self.amr_header['numbl'][l, c]
- else:
- ng = self.ngridbound[c - self.amr_header['ncpu'] +
- self.amr_header['nboundary']*l]
- return ng
- min_level = self.ds.min_level
- # yt max level is not the same as the RAMSES one.
- # yt max level is the maximum number of additional refinement levels
- # so for a uni grid run with no refinement, it would be 0.
- # So we initially assume that.
- max_level = 0
- nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
- for level in range(self.amr_header['nlevelmax']):
- # Easier if do this 1-indexed
- for cpu in range(self.amr_header['nboundary'] + self.amr_header['ncpu']):
- #ng is the number of octs on this level on this domain
- ng = _ng(cpu, level)
- if ng == 0: continue
- ind = fpu.read_vector(f, "I").astype("int64")
- fpu.skip(f, 2)
- pos = np.empty((ng, 3), dtype='float64')
- pos[:,0] = fpu.read_vector(f, "d") - nx
- pos[:,1] = fpu.read_vector(f, "d") - ny
- pos[:,2] = fpu.read_vector(f, "d") - nz
- #pos *= self.ds.domain_width
- #pos += self.dataset.domain_left_edge
- fpu.skip(f, 31)
- #parents = fpu.read_vector(f, "I")
- #fpu.skip(f, 6)
- #children = np.empty((ng, 8), dtype='int64')
- #for i in range(8):
- # children[:,i] = fpu.read_vector(f, "I")
- #cpu_map = np.empty((ng, 8), dtype="int64")
- #for i in range(8):
- # cpu_map[:,i] = fpu.read_vector(f, "I")
- #rmap = np.empty((ng, 8), dtype="int64")
- #for i in range(8):
- # rmap[:,i] = fpu.read_vector(f, "I")
- # We don't want duplicate grids.
- # Note that we're adding *grids*, not individual cells.
- if level >= min_level:
- assert(pos.shape[0] == ng)
- n = self.oct_handler.add(cpu + 1, level - min_level, pos,
- count_boundary = 1)
- self._error_check(cpu, level, pos, n, ng, (nx, ny, nz))
- if n > 0: max_level = max(level - min_level, max_level)
- self.max_level = max_level
- self.oct_handler.finalize()
-
- def _error_check(self, cpu, level, pos, n, ng, nn):
- # NOTE: We have the second conditional here because internally, it will
- # not add any octs in that case.
- if n == ng or cpu + 1 > self.oct_handler.num_domains:
- return
- # This is where we now check for issues with creating the new octs, and
- # we attempt to determine what precisely is going wrong.
- # These are all print statements.
- print "We have detected an error with the construction of the Octree."
- print " The number of Octs to be added : %s" % ng
- print " The number of Octs added : %s" % n
- print " Level : %s" % level
- print " CPU Number (0-indexed) : %s" % cpu
- for i, ax in enumerate('xyz'):
- print " extent [%s] : %s %s" % \
- (ax, pos[:,i].min(), pos[:,i].max())
- print " domain left : %s" % \
- (self.ds.domain_left_edge,)
- print " domain right : %s" % \
- (self.ds.domain_right_edge,)
- print " offset applied : %s %s %s" % \
- (nn[0], nn[1], nn[2])
- print "AMR Header:"
- for key in sorted(self.amr_header):
- print " %-30s: %s" % (key, self.amr_header[key])
- raise RuntimeError
-
- def included(self, selector):
- if getattr(selector, "domain_id", None) is not None:
- return selector.domain_id == self.domain_id
- domain_ids = self.oct_handler.domain_identify(selector)
- return self.domain_id in domain_ids
-
-class RAMSESDomainSubset(OctreeSubset):
-
- _domain_offset = 1
-
- def fill(self, content, fields, selector):
- # Here we get a copy of the file, which we skip through and read the
- # bits we want.
- oct_handler = self.oct_handler
- all_fields = self.domain.ds.index.fluid_field_list
- fields = [f for ft, f in fields]
- tr = {}
- cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
- levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
- selector, self.domain_id, cell_count)
- for field in fields:
- tr[field] = np.zeros(cell_count, 'float64')
- for level, offset in enumerate(self.domain.hydro_offset):
- if offset == -1: continue
- content.seek(offset)
- nc = self.domain.level_count[level]
- temp = {}
- for field in all_fields:
- temp[field] = np.empty((nc, 8), dtype="float64")
- for i in range(8):
- for field in all_fields:
- if field not in fields:
- fpu.skip(content)
- else:
- temp[field][:,i] = fpu.read_vector(content, 'd') # cell 1
- oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp)
- return tr
-
-class RAMSESIndex(OctreeIndex):
-
- def __init__(self, ds, dataset_type='ramses'):
- self.fluid_field_list = ds._fields_in_file
- self.dataset_type = dataset_type
- self.dataset = weakref.proxy(ds)
- self.index_filename = self.dataset.parameter_filename
- self.directory = os.path.dirname(self.index_filename)
- self.max_level = None
-
- self.float_type = np.float64
- super(RAMSESIndex, self).__init__(ds, dataset_type)
-
- def _initialize_oct_handler(self):
- self.domains = [RAMSESDomainFile(self.dataset, i + 1)
- for i in range(self.dataset['ncpu'])]
- total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
- for dom in self.domains)
- self.max_level = max(dom.max_level for dom in self.domains)
- self.num_grids = total_octs
-
- def _detect_output_fields(self):
- # Do we want to attempt to figure out what the fields are in the file?
- dsl = set([])
- if self.fluid_field_list is None or len(self.fluid_field_list) <= 0:
- self._setup_auto_fields()
- for domain in self.domains:
- dsl.update(set(domain.particle_field_offsets.keys()))
- self.particle_field_list = list(dsl)
- self.field_list = [("ramses", f) for f in self.fluid_field_list] \
- + self.particle_field_list
-
- def _setup_auto_fields(self):
- '''
- If no fluid fields are set, the code tries to set up a fluids array by hand
- '''
- # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
- # Find nvar
-
-
- # TODO: copy/pasted from DomainFile; needs refactoring!
- num = os.path.basename(self.dataset.parameter_filename).split("."
- )[0].split("_")[1]
- testdomain = 1 # Just pick the first domain file to read
- basename = "%s/%%s_%s.out%05i" % (
- os.path.abspath(
- os.path.dirname(self.dataset.parameter_filename)),
- num, testdomain)
- hydro_fn = basename % "hydro"
- # Do we have a hydro file?
- if not os.path.exists(hydro_fn):
- self.fluid_field_list = []
- return
- # Read the number of hydro variables
- f = open(hydro_fn, "rb")
- hydro_header = ( ('ncpu', 1, 'i'),
- ('nvar', 1, 'i'),
- ('ndim', 1, 'i'),
- ('nlevelmax', 1, 'i'),
- ('nboundary', 1, 'i'),
- ('gamma', 1, 'd')
- )
- hvals = fpu.read_attrs(f, hydro_header)
- self.ds.gamma = hvals['gamma']
- nvar = hvals['nvar']
- # OK, we got NVAR, now set up the arrays depending on what NVAR is
- # Allow some wiggle room for users to add too many variables
- if nvar < 5:
- mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
- raise ValueError
- # Basic hydro runs
- if nvar == 5:
- fields = ["Density",
- "x-velocity", "y-velocity", "z-velocity",
- "Pressure"]
- if nvar > 5 and nvar < 11:
- fields = ["Density",
- "x-velocity", "y-velocity", "z-velocity",
- "Pressure", "Metallicity"]
- # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
- if nvar == 11:
- fields = ["Density",
- "x-velocity", "y-velocity", "z-velocity",
- "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
- "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
- "Pressure"]
- if nvar > 11:
- fields = ["Density",
- "x-velocity", "y-velocity", "z-velocity",
- "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
- "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
- "Pressure","Metallicity"]
- while len(fields) < nvar:
- fields.append("var"+str(len(fields)))
- mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))
- self.fluid_field_list = fields
-
- def _identify_base_chunk(self, dobj):
- if getattr(dobj, "_chunk_info", None) is None:
- domains = [dom for dom in self.domains if
- dom.included(dobj.selector)]
- base_region = getattr(dobj, "base_region", dobj)
- if len(domains) > 1:
- mylog.debug("Identified %s intersecting domains", len(domains))
- subsets = [RAMSESDomainSubset(base_region, domain, self.dataset)
- for domain in domains]
- dobj._chunk_info = subsets
- dobj._current_chunk = list(self._chunk_all(dobj))[0]
-
- def _chunk_all(self, dobj):
- oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
- yield YTDataChunk(dobj, "all", oobjs, None)
-
- def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
- sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
- for i,og in enumerate(sobjs):
- if ngz > 0:
- g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
- else:
- g = og
- yield YTDataChunk(dobj, "spatial", [g], None)
-
- def _chunk_io(self, dobj, cache = True, local_only = False):
- oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
- for subset in oobjs:
- yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
-
-class RAMSESDataset(Dataset):
- _index_class = RAMSESIndex
- _field_info_class = RAMSESFieldInfo
- gamma = 1.4 # This will get replaced on hydro_fn open
-
- def __init__(self, filename, dataset_type='ramses',
- fields = None, storage_filename = None,
- units_override=None):
- # Here we want to initiate a traceback, if the reader is not built.
- if isinstance(fields, types.StringTypes):
- fields = field_aliases[fields]
- '''
- fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
- If set to None, will try a default set of fields
- '''
- self.fluid_types += ("ramses",)
- self._fields_in_file = fields
- Dataset.__init__(self, filename, dataset_type, units_override=units_override)
- self.storage_filename = storage_filename
-
- def __repr__(self):
- return self.basename.rsplit(".", 1)[0]
-
- def _set_code_unit_attributes(self):
- """
- Generates the conversion to various physical _units based on the parameter file
- """
- # Note that unit_l *already* converts to proper!
- # Also note that unit_l must be multiplied by the boxlen parameter to
- # ensure we are correctly set up for the current domain.
-<<<<<<< local
- length_unit = self.parameters['unit_l'] * self.parameters['boxlen']
- rho_u = self.parameters['unit_d']/ self.parameters['boxlen']**3
- # We're not multiplying by the boxlength here.
- mass_unit = rho_u * length_unit**3
- # Cosmological runs are done in lookback conformal time.
- #To convert to proper time, the time unit is calculated from
- # the expansion factor.
-=======
- length_unit = self.parameters['unit_l']
- boxlen = self.parameters['boxlen']
- density_unit = self.parameters['unit_d']
- mass_unit = density_unit * (length_unit * boxlen)**3
->>>>>>> other
- time_unit = self.parameters['unit_t']
- magnetic_unit = np.sqrt(4*np.pi * mass_unit /
- (time_unit**2 * length_unit))
- pressure_unit = density_unit * (length_unit / time_unit)**2
- # TODO:
- # Generalize the temperature field to account for ionization
- # For now assume an atomic ideal gas with cosmic abundances (x_H = 0.76)
- mean_molecular_weight_factor = _X**-1
-
- self.density_unit = self.quan(density_unit, 'g/cm**3')
- self.magnetic_unit = self.quan(magnetic_unit, "gauss")
- self.length_unit = self.quan(length_unit * boxlen, "cm")
- self.mass_unit = self.quan(mass_unit, "g")
- self.time_unit = self.quan(time_unit, "s")
- self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
- self.temperature_unit = (self.velocity_unit**2 * mp *
- mean_molecular_weight_factor / kb)
- self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
-
- def _parse_parameter_file(self):
- # hardcoded for now
- # These should be explicitly obtained from the file, but for now that
- # will wait until a reorganization of the source tree and better
- # generalization.
- self.dimensionality = 3
- self.refine_by = 2
- self.parameters["HydroMethod"] = 'ramses'
- self.parameters["Time"] = 1. # default unit is 1...
-
- self.unique_identifier = \
- int(os.stat(self.parameter_filename)[stat.ST_CTIME])
- # We now execute the same logic Oliver's code does
- rheader = {}
- f = open(self.parameter_filename)
- def read_rhs(cast):
- line = f.readline()
- p, v = line.split("=")
- rheader[p.strip()] = cast(v)
- for i in range(6): read_rhs(int)
- f.readline()
- for i in range(11): read_rhs(float)
- f.readline()
- read_rhs(str)
- # This next line deserves some comment. We specify a min_level that
- # corresponds to the minimum level in the RAMSES simulation. RAMSES is
- # one-indexed, but it also does refer to the *oct* dimensions -- so
- # this means that a levelmin of 1 would have *1* oct in it. So a
- # levelmin of 2 would have 8 octs at the root mesh level.
- self.min_level = rheader['levelmin'] - 1
- # Now we read the hilbert indices
- self.hilbert_indices = {}
- if rheader['ordering type'] == "hilbert":
- f.readline() # header
- for n in range(rheader['ncpu']):
- dom, mi, ma = f.readline().split()
- self.hilbert_indices[int(dom)] = (float(mi), float(ma))
- self.parameters.update(rheader)
- self.current_time = self.parameters['time']
- self.domain_left_edge = np.zeros(3, dtype='float64')
- self.domain_dimensions = np.ones(3, dtype='int32') * \
- 2**(self.min_level+1)
- self.domain_right_edge = np.ones(3, dtype='float64')
- # This is likely not true, but it's not clear how to determine the boundary conditions
- self.periodicity = (True, True, True)
- # These conditions seem to always be true for non-cosmological datasets
- if rheader["time"] > 0 and rheader["H0"] == 1 and rheader["aexp"] == 1:
- self.cosmological_simulation = 0
- self.current_redshift = 0
- self.hubble_constant = 0
- self.omega_matter = 0
- self.omega_lambda = 0
- else:
- self.cosmological_simulation = 1
- self.current_redshift = (1.0 / rheader["aexp"]) - 1.0
- self.omega_lambda = rheader["omega_l"]
- self.omega_matter = rheader["omega_m"]
- self.hubble_constant = rheader["H0"] / 100.0 # This is H100
- self.max_level = rheader['levelmax'] - self.min_level - 1
- f.close()
-
- @classmethod
- def _is_valid(self, *args, **kwargs):
- if not os.path.basename(args[0]).startswith("info_"): return False
- fn = args[0].replace("info_", "amr_").replace(".txt", ".out00001")
- return os.path.exists(fn)
diff -r 1c83632dd94f97b2eb05f4f41d64ff14ece34ffa -r 62ee0fcb0c7bc9acdc32370c18ff3967527bf33b yt/frontends/ramses/data_structures.py~
--- a/yt/frontends/ramses/data_structures.py~
+++ /dev/null
@@ -1,563 +0,0 @@
-"""
-RAMSES-specific data structures
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import os
-import numpy as np
-import stat
-import weakref
-import cStringIO
-import scipy.integrate
-
-from yt.funcs import *
-from yt.geometry.oct_geometry_handler import \
- OctreeIndex
-from yt.geometry.geometry_handler import \
- Index, YTDataChunk
-from yt.data_objects.static_output import \
- Dataset
-from yt.data_objects.octree_subset import \
- OctreeSubset
-
-from .definitions import ramses_header, field_aliases
-from yt.utilities.lib.misc_utilities import \
- get_box_grids_level
-from yt.utilities.io_handler import \
- io_registry
-from .fields import \
- RAMSESFieldInfo
-import yt.utilities.fortran_utils as fpu
-from yt.geometry.oct_container import \
- RAMSESOctreeContainer
-from yt.fields.particle_fields import \
- standard_particle_fields
-
-class RAMSESDomainFile(object):
- _last_mask = None
- _last_selector_id = None
-
- def __init__(self, ds, domain_id):
- self.ds = ds
- self.domain_id = domain_id
- self.nvar = 0 # Set this later!
- num = os.path.basename(ds.parameter_filename).split("."
- )[0].split("_")[1]
- basename = "%s/%%s_%s.out%05i" % (
- os.path.abspath(
- os.path.dirname(ds.parameter_filename)),
- num, domain_id)
- for t in ['grav', 'hydro', 'part', 'amr']:
- setattr(self, "%s_fn" % t, basename % t)
- self._read_amr_header()
- self._read_hydro_header()
- self._read_particle_header()
- self._read_amr()
-
- _hydro_offset = None
- _level_count = None
-
- def __repr__(self):
- return "RAMSESDomainFile: %i" % self.domain_id
-
- def _is_hydro(self):
- '''
- Does the output include hydro?
- '''
- return os.path.exists(self.hydro_fn)
-
- @property
- def level_count(self):
- if self._level_count is not None: return self._level_count
- self.hydro_offset
- return self._level_count
-
- @property
- def hydro_offset(self):
- if self._hydro_offset is not None: return self._hydro_offset
- # We now have to open the file and calculate it
- f = open(self.hydro_fn, "rb")
- fpu.skip(f, 6)
- # It goes: level, CPU, 8-variable
- min_level = self.ds.min_level
- n_levels = self.amr_header['nlevelmax'] - min_level
- hydro_offset = np.zeros(n_levels, dtype='int64')
- hydro_offset -= 1
- level_count = np.zeros(n_levels, dtype='int64')
- skipped = []
- for level in range(self.amr_header['nlevelmax']):
- for cpu in range(self.amr_header['nboundary'] +
- self.amr_header['ncpu']):
- header = ( ('file_ilevel', 1, 'I'),
- ('file_ncache', 1, 'I') )
- try:
- hvals = fpu.read_attrs(f, header, "=")
- except AssertionError:
- print "You are running with the wrong number of fields."
- print "If you specified these in the load command, check the array length."
- print "In this file there are %s hydro fields." % skipped
- #print "The last set of field sizes was: %s" % skipped
- raise
- if hvals['file_ncache'] == 0: continue
- assert(hvals['file_ilevel'] == level+1)
- if cpu + 1 == self.domain_id and level >= min_level:
- hydro_offset[level - min_level] = f.tell()
- level_count[level - min_level] = hvals['file_ncache']
- skipped = fpu.skip(f, 8 * self.nvar)
- self._hydro_offset = hydro_offset
- self._level_count = level_count
- return self._hydro_offset
-
- def _read_hydro_header(self):
- # If no hydro file is found, return
- if not self._is_hydro():
- return
- if self.nvar > 0: return self.nvar
- # Read the number of hydro variables
- f = open(self.hydro_fn, "rb")
- fpu.skip(f, 1)
- self.nvar = fpu.read_vector(f, "i")[0]
-
- def _read_particle_header(self):
- if not os.path.exists(self.part_fn):
- self.local_particle_count = 0
- self.particle_field_offsets = {}
- return
- f = open(self.part_fn, "rb")
- f.seek(0, os.SEEK_END)
- flen = f.tell()
- f.seek(0)
- hvals = {}
- attrs = ( ('ncpu', 1, 'I'),
- ('ndim', 1, 'I'),
- ('npart', 1, 'I') )
- hvals.update(fpu.read_attrs(f, attrs))
- fpu.read_vector(f, 'I')
-
- attrs = ( ('nstar_tot', 1, 'I'),
- ('mstar_tot', 1, 'd'),
- ('mstar_lost', 1, 'd'),
- ('nsink', 1, 'I') )
- hvals.update(fpu.read_attrs(f, attrs))
- self.particle_header = hvals
- self.local_particle_count = hvals['npart']
- particle_fields = [
- ("particle_position_x", "d"),
- ("particle_position_y", "d"),
- ("particle_position_z", "d"),
- ("particle_velocity_x", "d"),
- ("particle_velocity_y", "d"),
- ("particle_velocity_z", "d"),
- ("particle_mass", "d"),
- ("particle_identifier", "I"),
- ("particle_refinement_level", "I")]
- if hvals["nstar_tot"] > 0:
- particle_fields += [("particle_age", "d"),
- ("particle_metallicity", "d")]
- field_offsets = {}
- _pfields = {}
- for field, vtype in particle_fields:
- if f.tell() >= flen: break
- field_offsets["io", field] = f.tell()
- _pfields["io", field] = vtype
- fpu.skip(f, 1)
- self.particle_field_offsets = field_offsets
- self.particle_field_types = _pfields
- self.particle_types = self.particle_types_raw = ("io",)
-
- def _read_amr_header(self):
- hvals = {}
- f = open(self.amr_fn, "rb")
- for header in ramses_header(hvals):
- hvals.update(fpu.read_attrs(f, header))
- # That's the header, now we skip a few.
- hvals['numbl'] = np.array(hvals['numbl']).reshape(
- (hvals['nlevelmax'], hvals['ncpu']))
- fpu.skip(f)
- if hvals['nboundary'] > 0:
- fpu.skip(f, 2)
- self.ngridbound = fpu.read_vector(f, 'i').astype("int64")
- else:
- self.ngridbound = np.zeros(hvals['nlevelmax'], dtype='int64')
- free_mem = fpu.read_attrs(f, (('free_mem', 5, 'i'), ) )
- ordering = fpu.read_vector(f, 'c')
- fpu.skip(f, 4)
- # Now we're at the tree itself
- # Now we iterate over each level and each CPU.
- self.amr_header = hvals
- self.amr_offset = f.tell()
- self.local_oct_count = hvals['numbl'][self.ds.min_level:, self.domain_id - 1].sum()
- self.total_oct_count = hvals['numbl'][self.ds.min_level:,:].sum(axis=0)
-
- def _read_amr(self):
- """Open the oct file, read in octs level-by-level.
- For each oct, only the position, index, level and domain
- are needed - its position in the octree is found automatically.
- The most important is finding all the information to feed
- oct_handler.add
- """
- self.oct_handler = RAMSESOctreeContainer(self.ds.domain_dimensions/2,
- self.ds.domain_left_edge, self.ds.domain_right_edge)
- root_nodes = self.amr_header['numbl'][self.ds.min_level,:].sum()
- self.oct_handler.allocate_domains(self.total_oct_count, root_nodes)
- fb = open(self.amr_fn, "rb")
- fb.seek(self.amr_offset)
- f = cStringIO.StringIO()
- f.write(fb.read())
- f.seek(0)
- mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)",
- self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum())
- def _ng(c, l):
- if c < self.amr_header['ncpu']:
- ng = self.amr_header['numbl'][l, c]
- else:
- ng = self.ngridbound[c - self.amr_header['ncpu'] +
- self.amr_header['nboundary']*l]
- return ng
- min_level = self.ds.min_level
- max_level = min_level
- nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
- for level in range(self.amr_header['nlevelmax']):
- # Easier if do this 1-indexed
- for cpu in range(self.amr_header['nboundary'] + self.amr_header['ncpu']):
- #ng is the number of octs on this level on this domain
- ng = _ng(cpu, level)
- if ng == 0: continue
- ind = fpu.read_vector(f, "I").astype("int64")
- fpu.skip(f, 2)
- pos = np.empty((ng, 3), dtype='float64')
- pos[:,0] = fpu.read_vector(f, "d") - nx
- pos[:,1] = fpu.read_vector(f, "d") - ny
- pos[:,2] = fpu.read_vector(f, "d") - nz
- #pos *= self.ds.domain_width
- #pos += self.dataset.domain_left_edge
- fpu.skip(f, 31)
- #parents = fpu.read_vector(f, "I")
- #fpu.skip(f, 6)
- #children = np.empty((ng, 8), dtype='int64')
- #for i in range(8):
- # children[:,i] = fpu.read_vector(f, "I")
- #cpu_map = np.empty((ng, 8), dtype="int64")
- #for i in range(8):
- # cpu_map[:,i] = fpu.read_vector(f, "I")
- #rmap = np.empty((ng, 8), dtype="int64")
- #for i in range(8):
- # rmap[:,i] = fpu.read_vector(f, "I")
- # We don't want duplicate grids.
- # Note that we're adding *grids*, not individual cells.
- if level >= min_level:
- assert(pos.shape[0] == ng)
- n = self.oct_handler.add(cpu + 1, level - min_level, pos,
- count_boundary = 1)
- self._error_check(cpu, level, pos, n, ng, (nx, ny, nz))
- if n > 0: max_level = max(level - min_level, max_level)
- self.max_level = max_level
- self.oct_handler.finalize()
-
- def _error_check(self, cpu, level, pos, n, ng, nn):
- # NOTE: We have the second conditional here because internally, it will
- # not add any octs in that case.
- if n == ng or cpu + 1 > self.oct_handler.num_domains:
- return
- # This is where we now check for issues with creating the new octs, and
- # we attempt to determine what precisely is going wrong.
- # These are all print statements.
- print "We have detected an error with the construction of the Octree."
- print " The number of Octs to be added : %s" % ng
- print " The number of Octs added : %s" % n
- print " Level : %s" % level
- print " CPU Number (0-indexed) : %s" % cpu
- for i, ax in enumerate('xyz'):
- print " extent [%s] : %s %s" % \
- (ax, pos[:,i].min(), pos[:,i].max())
- print " domain left : %s" % \
- (self.ds.domain_left_edge,)
- print " domain right : %s" % \
- (self.ds.domain_right_edge,)
- print " offset applied : %s %s %s" % \
- (nn[0], nn[1], nn[2])
- print "AMR Header:"
- for key in sorted(self.amr_header):
- print " %-30s: %s" % (key, self.amr_header[key])
- raise RuntimeError
-
- def included(self, selector):
- if getattr(selector, "domain_id", None) is not None:
- return selector.domain_id == self.domain_id
- domain_ids = self.oct_handler.domain_identify(selector)
- return self.domain_id in domain_ids
-
-class RAMSESDomainSubset(OctreeSubset):
-
- _domain_offset = 1
-
- def fill(self, content, fields, selector):
- # Here we get a copy of the file, which we skip through and read the
- # bits we want.
- oct_handler = self.oct_handler
- all_fields = self.domain.ds.index.fluid_field_list
- fields = [f for ft, f in fields]
- tr = {}
- cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
- levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
- selector, self.domain_id, cell_count)
- for field in fields:
- tr[field] = np.zeros(cell_count, 'float64')
- for level, offset in enumerate(self.domain.hydro_offset):
- if offset == -1: continue
- content.seek(offset)
- nc = self.domain.level_count[level]
- temp = {}
- for field in all_fields:
- temp[field] = np.empty((nc, 8), dtype="float64")
- for i in range(8):
- for field in all_fields:
- if field not in fields:
- fpu.skip(content)
- else:
- temp[field][:,i] = fpu.read_vector(content, 'd') # cell 1
- oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp)
- return tr
-
-class RAMSESIndex(OctreeIndex):
-
- def __init__(self, ds, dataset_type='ramses'):
- self._ds = ds # TODO: Figure out the class composition better!
- self.fluid_field_list = ds._fields_in_file
- self.dataset_type = dataset_type
- self.dataset = weakref.proxy(ds)
- self.index_filename = self.dataset.parameter_filename
- self.directory = os.path.dirname(self.index_filename)
- self.max_level = None
-
- self.float_type = np.float64
- super(RAMSESIndex, self).__init__(ds, dataset_type)
-
- def _initialize_oct_handler(self):
- self.domains = [RAMSESDomainFile(self.dataset, i + 1)
- for i in range(self.dataset['ncpu'])]
- total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
- for dom in self.domains)
- self.max_level = max(dom.max_level for dom in self.domains)
- self.num_grids = total_octs
-
- def _detect_output_fields(self):
- # Do we want to attempt to figure out what the fields are in the file?
- dsl = set([])
- if self.fluid_field_list is None or len(self.fluid_field_list) <= 0:
- self._setup_auto_fields()
- for domain in self.domains:
- dsl.update(set(domain.particle_field_offsets.keys()))
- self.particle_field_list = list(dsl)
- self.field_list = [("ramses", f) for f in self.fluid_field_list] \
- + self.particle_field_list
-
- def _setup_auto_fields(self):
- '''
- If no fluid fields are set, the code tries to set up a fluids array by hand
- '''
- # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
- # Find nvar
-
-
- # TODO: copy/pasted from DomainFile; needs refactoring!
- num = os.path.basename(self._ds.parameter_filename).split("."
- )[0].split("_")[1]
- testdomain = 1 # Just pick the first domain file to read
- basename = "%s/%%s_%s.out%05i" % (
- os.path.abspath(
- os.path.dirname(self._ds.parameter_filename)),
- num, testdomain)
- hydro_fn = basename % "hydro"
- # Do we have a hydro file?
- if not os.path.exists(hydro_fn):
- self.fluid_field_list = []
- return
- # Read the number of hydro variables
- f = open(hydro_fn, "rb")
- hydro_header = ( ('ncpu', 1, 'i'),
- ('nvar', 1, 'i'),
- ('ndim', 1, 'i'),
- ('nlevelmax', 1, 'i'),
- ('nboundary', 1, 'i'),
- ('gamma', 1, 'd')
- )
- hvals = fpu.read_attrs(f, hydro_header)
- self.ds.gamma = hvals['gamma']
- nvar = hvals['nvar']
- # OK, we got NVAR, now set up the arrays depending on what NVAR is
- # Allow some wiggle room for users to add too many variables
- if nvar < 5:
- mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
- raise ValueError
- # Basic hydro runs
- if nvar == 5:
- fields = ["Density",
- "x-velocity", "y-velocity", "z-velocity",
- "Pressure"]
- if nvar > 5 and nvar < 11:
- fields = ["Density",
- "x-velocity", "y-velocity", "z-velocity",
- "Pressure", "Metallicity"]
- # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
- if nvar == 11:
- fields = ["Density",
- "x-velocity", "y-velocity", "z-velocity",
- "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
- "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
- "Pressure"]
- if nvar > 11:
- fields = ["Density",
- "x-velocity", "y-velocity", "z-velocity",
- "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
- "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
- "Pressure","Metallicity"]
- while len(fields) < nvar:
- fields.append("var"+str(len(fields)))
- mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))
- self.fluid_field_list = fields
-
- def _identify_base_chunk(self, dobj):
- if getattr(dobj, "_chunk_info", None) is None:
- domains = [dom for dom in self.domains if
- dom.included(dobj.selector)]
- base_region = getattr(dobj, "base_region", dobj)
- if len(domains) > 1:
- mylog.debug("Identified %s intersecting domains", len(domains))
- subsets = [RAMSESDomainSubset(base_region, domain, self.dataset)
- for domain in domains]
- dobj._chunk_info = subsets
- dobj._current_chunk = list(self._chunk_all(dobj))[0]
-
- def _chunk_all(self, dobj):
- oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
- yield YTDataChunk(dobj, "all", oobjs, None)
-
- def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
- sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
- for i,og in enumerate(sobjs):
- if ngz > 0:
- g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
- else:
- g = og
- yield YTDataChunk(dobj, "spatial", [g], None)
-
- def _chunk_io(self, dobj, cache = True, local_only = False):
- oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
- for subset in oobjs:
- yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
-
-class RAMSESDataset(Dataset):
- _index_class = RAMSESIndex
- _field_info_class = RAMSESFieldInfo
- gamma = 1.4 # This will get replaced on hydro_fn open
-
- def __init__(self, filename, dataset_type='ramses',
- fields = None, storage_filename = None):
- # Here we want to initiate a traceback, if the reader is not built.
- if isinstance(fields, types.StringTypes):
- fields = field_aliases[fields]
- '''
- fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
- If set to None, will try a default set of fields
- '''
- self.fluid_types += ("ramses",)
- self._fields_in_file = fields
- Dataset.__init__(self, filename, dataset_type)
- self.storage_filename = storage_filename
-
- def __repr__(self):
- return self.basename.rsplit(".", 1)[0]
-
- def _set_code_unit_attributes(self):
- """
- Generates the conversion to various physical _units based on the parameter file
- """
- # Note that unit_l *already* converts to proper!
- # Also note that unit_l must be multiplied by the boxlen parameter to
- # ensure we are correctly set up for the current domain.
- length_unit = self.parameters['unit_l'] * self.parameters['boxlen']
- rho_u = self.parameters['unit_d']/ self.parameters['boxlen']**3
- # We're not multiplying by the boxlength here.
- mass_unit = rho_u * self.parameters['unit_l']**3
- # Cosmological runs are done in lookback conformal time.
- #To convert to proper time, the time unit is calculated from
- # the expansion factor.
- time_unit = self.parameters['unit_t']
- magnetic_unit = np.sqrt(4*np.pi * mass_unit /
- (time_unit**2 * length_unit))
- self.magnetic_unit = self.quan(magnetic_unit, "gauss")
- self.length_unit = self.quan(length_unit, "cm")
- self.mass_unit = self.quan(mass_unit, "g")
- self.time_unit = self.quan(time_unit, "s")
- self.velocity_unit = self.length_unit / self.time_unit
-
- def _parse_parameter_file(self):
- # hardcoded for now
- # These should be explicitly obtained from the file, but for now that
- # will wait until a reorganization of the source tree and better
- # generalization.
- self.dimensionality = 3
- self.refine_by = 2
- self.parameters["HydroMethod"] = 'ramses'
- self.parameters["Time"] = 1. # default unit is 1...
-
- self.unique_identifier = \
- int(os.stat(self.parameter_filename)[stat.ST_CTIME])
- # We now execute the same logic Oliver's code does
- rheader = {}
- f = open(self.parameter_filename)
- def read_rhs(cast):
- line = f.readline()
- p, v = line.split("=")
- rheader[p.strip()] = cast(v)
- for i in range(6): read_rhs(int)
- f.readline()
- for i in range(11): read_rhs(float)
- f.readline()
- read_rhs(str)
- # This next line deserves some comment. We specify a min_level that
- # corresponds to the minimum level in the RAMSES simulation. RAMSES is
- # one-indexed, but it also does refer to the *oct* dimensions -- so
- # this means that a levelmin of 1 would have *1* oct in it. So a
- # levelmin of 2 would have 8 octs at the root mesh level.
- self.min_level = rheader['levelmin'] - 1
- # Now we read the hilbert indices
- self.hilbert_indices = {}
- if rheader['ordering type'] == "hilbert":
- f.readline() # header
- for n in range(rheader['ncpu']):
- dom, mi, ma = f.readline().split()
- self.hilbert_indices[int(dom)] = (float(mi), float(ma))
- self.parameters.update(rheader)
- self.current_time = self.parameters['time']
- self.domain_left_edge = np.zeros(3, dtype='float64')
- self.domain_dimensions = np.ones(3, dtype='int32') * \
- 2**(self.min_level+1)
- self.domain_right_edge = np.ones(3, dtype='float64')
- # This is likely not true, but I am not sure how to otherwise
- # distinguish them.
- mylog.warning("RAMSES frontend assumes all simulations are cosmological!")
- self.cosmological_simulation = 1
- self.periodicity = (True, True, True)
- self.current_redshift = (1.0 / rheader["aexp"]) - 1.0
- self.omega_lambda = rheader["omega_l"]
- self.omega_matter = rheader["omega_m"]
- self.hubble_constant = rheader["H0"] / 100.0 # This is H100
- self.max_level = rheader['levelmax'] - self.min_level
-
- @classmethod
- def _is_valid(self, *args, **kwargs):
- if not os.path.basename(args[0]).startswith("info_"): return False
- fn = args[0].replace("info_", "amr_").replace(".txt", ".out00001")
- return os.path.exists(fn)
diff -r 1c83632dd94f97b2eb05f4f41d64ff14ece34ffa -r 62ee0fcb0c7bc9acdc32370c18ff3967527bf33b yt/geometry/#oct_geometry_handler.py#
--- a/yt/geometry/#oct_geometry_handler.py#
+++ /dev/null
@@ -1,52 +0,0 @@
-"""
-Octree geometry handler
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-import string, re, gc, time, cPickle
-import weakref
-
-from itertools import chain, izip
-
-from yt.funcs import *
-from yt.utilities.logger import ytLogger as mylog
-from yt.arraytypes import blankRecordArray
-from yt.config import ytcfg
-from yt.fields.field_info_container import NullFunc
-from yt.geometry.geometry_handler import Index, YTDataChunk
-from yt.utilities.definitions import MAXLEVEL
-from yt.utilities.io_handler import io_registry
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
- ParallelAnalysisInterface
-
-from yt.data_objects.data_containers import data_object_registry
-
-class OctreeIndex(Index):
- """The Index subclass for oct AMR datasets"""
- def _setup_geometry(self):
- mylog.debug("Initializing Octree Geometry Handler.")
- self._initialize_oct_handler()
-
- def get_smallest_dx(self):
- """
- Returns (in code units) the smallest cell size in the simulation.
- """
- return (self.dataset.domain_width /
- (self.dataset.domain_dimensions * 2**(self.max_level))).min()
->>>>>>> other
-
- def convert(self, unit):
- return self.dataset.conversion_factors[unit]
diff -r 1c83632dd94f97b2eb05f4f41d64ff14ece34ffa -r 62ee0fcb0c7bc9acdc32370c18ff3967527bf33b yt/geometry/oct_geometry_handler.py.orig
--- a/yt/geometry/oct_geometry_handler.py.orig
+++ /dev/null
@@ -1,55 +0,0 @@
-"""
-Octree geometry handler
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-import string, re, gc, time, cPickle
-import weakref
-
-from itertools import chain, izip
-
-from yt.funcs import *
-from yt.utilities.logger import ytLogger as mylog
-from yt.arraytypes import blankRecordArray
-from yt.config import ytcfg
-from yt.fields.field_info_container import NullFunc
-from yt.geometry.geometry_handler import Index, YTDataChunk
-from yt.utilities.definitions import MAXLEVEL
-from yt.utilities.io_handler import io_registry
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
- ParallelAnalysisInterface
-
-from yt.data_objects.data_containers import data_object_registry
-
-class OctreeIndex(Index):
- """The Index subclass for oct AMR datasets"""
- def _setup_geometry(self):
- mylog.debug("Initializing Octree Geometry Handler.")
- self._initialize_oct_handler()
-
- def get_smallest_dx(self):
- """
- Returns (in code units) the smallest cell size in the simulation.
- """
- return (self.dataset.domain_width /
-<<<<<<< local
- (2**(self.dataset.max_level+1))).min()
-=======
- (self.dataset.domain_dimensions * 2**(self.max_level))).min()
->>>>>>> other
-
- def convert(self, unit):
- return self.dataset.conversion_factors[unit]
diff -r 1c83632dd94f97b2eb05f4f41d64ff14ece34ffa -r 62ee0fcb0c7bc9acdc32370c18ff3967527bf33b yt/geometry/oct_geometry_handler.py~
--- a/yt/geometry/oct_geometry_handler.py~
+++ /dev/null
@@ -1,51 +0,0 @@
-"""
-Octree geometry handler
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-import string, re, gc, time, cPickle
-import weakref
-
-from itertools import chain, izip
-
-from yt.funcs import *
-from yt.utilities.logger import ytLogger as mylog
-from yt.arraytypes import blankRecordArray
-from yt.config import ytcfg
-from yt.fields.field_info_container import NullFunc
-from yt.geometry.geometry_handler import Index, YTDataChunk
-from yt.utilities.definitions import MAXLEVEL
-from yt.utilities.io_handler import io_registry
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
- ParallelAnalysisInterface
-
-from yt.data_objects.data_containers import data_object_registry
-
-class OctreeIndex(Index):
- """The Index subclass for oct AMR datasets"""
- def _setup_geometry(self):
- mylog.debug("Initializing Octree Geometry Handler.")
- self._initialize_oct_handler()
-
- def get_smallest_dx(self):
- """
- Returns (in code units) the smallest cell size in the simulation.
- """
- return (self.dataset.domain_width /
- (2**(self.max_level+1))).min()
-
- def convert(self, unit):
- return self.dataset.conversion_factors[unit]
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/ae8049bb94b6/
Changeset: ae8049bb94b6
Branch: yt
User: RicardaBeckmann
Date: 2015-06-05 23:36:51+00:00
Summary: updated hgignore again
Affected #: 1 file
diff -r 62ee0fcb0c7bc9acdc32370c18ff3967527bf33b -r ae8049bb94b68b6e11eefc3ad183fdfe2335ba07 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -66,3 +66,4 @@
*~
*#
#*
+*.orig
https://bitbucket.org/yt_analysis/yt/commits/abe8e5733051/
Changeset: abe8e5733051
Branch: yt
User: RicardaBeckmann
Date: 2015-06-06 13:14:16+00:00
Summary: deleted accidental .c files, updated hgignore
Affected #: 3 files
diff -r ae8049bb94b68b6e11eefc3ad183fdfe2335ba07 -r abe8e5733051854223390cd462f23f3f3f4821f6 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -11,6 +11,7 @@
yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
yt/analysis_modules/ppv_cube/ppv_utils.c
yt/frontends/ramses/_ramses_reader.cpp
+yt/frontends/sph/smoothing_kernel.c
yt/geometry/fake_octree.c
yt/geometry/grid_container.c
yt/geometry/grid_visitors.c
@@ -40,6 +41,7 @@
yt/utilities/lib/mesh_utilities.c
yt/utilities/lib/misc_utilities.c
yt/utilities/lib/Octree.c
+yt/utilities/lib/GridTree.c
yt/utilities/lib/origami.c
yt/utilities/lib/pixelization_routines.c
yt/utilities/lib/png_writer.c
@@ -66,4 +68,3 @@
*~
*#
#*
-*.orig
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/33761abb4c1f/
Changeset: 33761abb4c1f
Branch: yt
User: RicardaBeckmann
Date: 2015-06-07 10:21:01+00:00
Summary: updated hgignore again
Affected #: 1 file
diff -r abe8e5733051854223390cd462f23f3f3f4821f6 -r 33761abb4c1f871e8fdd3b2339352b09ac7de329 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -65,6 +65,4 @@
doc/_temp/*
doc/source/bootcamp/.ipynb_checkpoints/
dist
-*~
-*#
-#*
+
https://bitbucket.org/yt_analysis/yt/commits/0b129f7d5fcf/
Changeset: 0b129f7d5fcf
Branch: yt
User: RicardaBeckmann
Date: 2015-06-07 10:26:19+00:00
Summary: .hgignore edited online with Bitbucket
Affected #: 1 file
diff -r 33761abb4c1f871e8fdd3b2339352b09ac7de329 -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -65,4 +65,3 @@
doc/_temp/*
doc/source/bootcamp/.ipynb_checkpoints/
dist
-
https://bitbucket.org/yt_analysis/yt/commits/bf0e6d76f16a/
Changeset: bf0e6d76f16a
Branch: yt
User: RicardaBeckmann
Date: 2015-07-14 09:30:46+00:00
Summary: Merged yt_analysis/yt into yt
Affected #: 66 files
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a doc/README
--- a/doc/README
+++ b/doc/README
@@ -7,4 +7,4 @@
Because the documentation requires a number of dependencies, we provide
pre-built versions online, accessible here:
-http://yt-project.org/docs/dev-3.0/
+http://yt-project.org/docs/dev/
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a doc/helper_scripts/run_recipes.py
--- a/doc/helper_scripts/run_recipes.py
+++ b/doc/helper_scripts/run_recipes.py
@@ -13,7 +13,7 @@
from yt.config import ytcfg
FPATTERNS = ['*.png', '*.txt', '*.h5', '*.dat']
-DPATTERNS = ['LC*', 'LR', 'DD0046', 'halo_analysis']
+DPATTERNS = ['LC*', 'LR', 'DD0046']
BADF = ['cloudy_emissivity.h5', 'apec_emissivity.h5',
'xray_emissivity.h5', 'AMRGridData_Slice_x_density.png']
CWD = os.getcwd()
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -10,6 +10,10 @@
simulated X-ray photon lists of events from datasets that yt is able
to read. The simulated events then can be exported to X-ray telescope
simulators to produce realistic observations or can be analyzed in-line.
+
+For detailed information about the design of the algorithm in yt, check
+out `the SciPy 2014 Proceedings. <http://conference.scipy.org/proceedings/scipy2014/zuhone.html>`_.
+
The algorithm is based off of that implemented in
`PHOX <http://www.mpa-garching.mpg.de/~kdolag/Phox/>`_ for SPH datasets
by Veronica Biffi and Klaus Dolag. There are two relevant papers:
@@ -139,6 +143,12 @@
the optional keyword ``thermal_broad`` is set to ``True``, the spectral
lines will be thermally broadened.
+.. note::
+
+ ``SpectralModel`` objects based on XSPEC models (both the thermal
+ emission and Galactic absorption models mentioned below) only work
+ in Python 2.7, since currently PyXspec only works with Python 2.x.
+
Now that we have our ``SpectralModel`` that gives us a spectrum, we need
to connect this model to a ``PhotonModel`` that will connect the field
data in the ``data_source`` to the spectral model to actually generate
@@ -148,7 +158,8 @@
.. code:: python
thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3,
- photons_per_chunk=100000000)
+ photons_per_chunk=100000000,
+ method="invert_cdf")
Where we pass in the ``SpectralModel``, and can optionally set values for
the hydrogen mass fraction ``X_H`` and metallicity ``Z_met``. If
@@ -165,6 +176,18 @@
this parameter needs to be set higher, or if you are looking to decrease memory
usage, you might set this parameter lower.
+The ``method`` keyword argument is also optional, and determines how the individual
+photon energies are generated from the spectrum. It may be set to one of two values:
+
+* ``method="invert_cdf"``: Construct the cumulative distribution function of the spectrum and invert
+ it, using uniformly drawn random numbers to determine the photon energies (fast, but relies
+ on construction of the CDF and interpolation between the points, so for some spectra it
+ may not be accurate enough).
+* ``method="accept_reject"``: Generate the photon energies from the spectrum using an acceptance-rejection
+ technique (accurate, but likely to be slow).
+
+``method="invert_cdf"`` (the default) should be sufficient for most cases.
+
Next, we need to specify "fiducial" values for the telescope collecting
area, exposure time, and cosmological redshift. Remember, the initial
photon generation will act as a source for Monte-Carlo sampling for more
@@ -191,12 +214,29 @@
By default, the angular diameter distance to the object is determined
from the ``cosmology`` and the cosmological ``redshift``. If a
``Cosmology`` instance is not provided, one will be made from the
-default cosmological parameters. If your source is local to the galaxy,
-you can set its distance directly, using a tuple, e.g.
-``dist=(30, "kpc")``. In this case, the ``redshift`` and ``cosmology``
-will be ignored. Finally, if the photon generating function accepts any
-parameters, they can be passed to ``from_scratch`` via a ``parameters``
-dictionary.
+default cosmological parameters. The ``center`` keyword argument specifies
+the center of the photon distribution, and the photon positions will be
+rescaled with this value as the origin. This argument accepts the following
+values:
+
+* A NumPy array or list corresponding to the coordinates of the center in
+ units of code length.
+* A ``YTArray`` corresponding to the coordinates of the center in some
+ length units.
+* ``"center"`` or ``"c"`` corresponds to the domain center.
+* ``"max"`` or ``"m"`` corresponds to the location of the maximum gas density.
+* A two-element tuple specifying the max or min of a specific field, e.g.,
+ ``("min","gravitational_potential")``, ``("max","dark_matter_density")``
+
+If ``center`` is not specified, ``from_scratch`` will attempt to use the
+``"center"`` field parameter of the ``data_source``.
+
+``from_scratch`` takes a few other optional keyword arguments. If your
+source is local to the galaxy, you can set its distance directly, using
+a tuple, e.g. ``dist=(30, "kpc")``. In this case, the ``redshift`` and
+``cosmology`` will be ignored. Finally, if the photon generating
+function accepts any parameters, they can be passed to ``from_scratch``
+via a ``parameters`` dictionary.
At this point, the ``photons`` are distributed in the three-dimensional
space of the ``data_source``, with energies in the rest frame of the
@@ -265,7 +305,7 @@
abs_model = TableAbsorbModel("tbabs_table.h5", 0.1)
Now we're ready to project the photons. First, we choose a line-of-sight
-vector ``L``. Second, we'll adjust the exposure time and the redshift.
+vector ``normal``. Second, we'll adjust the exposure time and the redshift.
Third, we'll pass in the absorption ``SpectrumModel``. Fourth, we'll
specify a ``sky_center`` in RA,DEC on the sky in degrees.
@@ -274,26 +314,40 @@
course far short of a full simulation of a telescope ray-trace, but it's
a quick-and-dirty way to get something close to the real thing. We'll
discuss how to get your simulated events into a format suitable for
-reading by telescope simulation codes later.
+reading by telescope simulation codes later. If you just want to convolve
+the photons with an ARF, you may specify that as the only response, but some
+ARFs are unnormalized and still require the RMF for normalization. Check with
+the documentation associated with these files for details. If we are using the
+RMF to convolve energies, we must set ``convolve_energies=True``.
.. code:: python
ARF = "chandra_ACIS-S3_onaxis_arf.fits"
RMF = "chandra_ACIS-S3_onaxis_rmf.fits"
- L = [0.0,0.0,1.0]
- events = photons.project_photons(L, exp_time_new=2.0e5, redshift_new=0.07, absorb_model=abs_model,
- sky_center=(187.5,12.333), responses=[ARF,RMF])
+ normal = [0.0,0.0,1.0]
+ events = photons.project_photons(normal, exp_time_new=2.0e5, redshift_new=0.07, dist_new=None,
+ absorb_model=abs_model, sky_center=(187.5,12.333), responses=[ARF,RMF],
+ convolve_energies=True, no_shifting=False, north_vector=None,
+ psf_sigma=None)
-Also, the optional keyword ``psf_sigma`` specifies a Gaussian standard
-deviation to scatter the photon sky positions around with, providing a
-crude representation of a PSF.
+In this case, we chose a three-vector ``normal`` to specify an arbitrary
+line-of-sight, but ``"x"``, ``"y"``, or ``"z"`` could also be chosen to
+project along one of those axes.
-.. warning::
+``project_photons`` takes several other optional keyword arguments.
- The binned images that result, even if you convolve with responses,
- are still of the same resolution as the finest cell size of the
- simulation dataset. If you want a more accurate simulation of a
- particular X-ray telescope, you should check out `Storing events for future use and for reading-in by telescope simulators`_.
+* ``no_shifting`` (default ``False``) controls whether or not Doppler
+ shifting of photon energies is turned on.
+* ``dist_new`` is a (value, unit) tuple that is used to set a new
+ angular diameter distance by hand instead of having it determined
+ by the cosmology and the value of the redshift. Should only be used
+ for simulations of nearby objects.
+* For off-axis ``normal`` vectors, the ``north_vector`` argument can
+ be used to control what vector corresponds to the "up" direction in
+ the resulting event list.
+* ``psf_sigma`` may be specified to provide a crude representation of
+ a PSF, and corresponds to the standard deviation (in degress) of a
+ Gaussian PSF model.
Let's just take a quick look at the raw events object:
@@ -343,19 +397,27 @@
Which is starting to look like a real observation!
+.. warning::
+
+ The binned images that result, even if you convolve with responses,
+ are still of the same resolution as the finest cell size of the
+ simulation dataset. If you want a more accurate simulation of a
+ particular X-ray telescope, you should check out `Storing events for future use and for reading-in by telescope simulators`_.
+
We can also bin up the spectrum into energy bins, and write it to a FITS
table file. This is an example where we've binned up the spectrum
according to the unconvolved photon energy:
.. code:: python
- events.write_spectrum("virgo_spec.fits", energy_bins=True, emin=0.1, emax=10.0, nchan=2000, clobber=True)
+ events.write_spectrum("virgo_spec.fits", bin_type="energy", emin=0.1, emax=10.0, nchan=2000, clobber=True)
-If we don't set ``energy_bins=True``, and we have convolved our events
+We can also set ``bin_type="channel"``. If we have convolved our events
with response files, then any other keywords will be ignored and it will
try to make a spectrum from the channel information that is contained
-within the RMF, suitable for analyzing in XSPEC. For now, we'll stick
-with the energy spectrum, and plot it up:
+within the RMF. Otherwise, the channels will be determined from the ``emin``,
+``emax``, and ``nchan`` keywords, and will be numbered from 1 to ``nchan``.
+For now, we'll stick with the energy spectrum, and plot it up:
.. code:: python
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -271,6 +271,29 @@
For a practical application of this, see :ref:`cookbook-radial-velocity`.
+Gradient Fields
+---------------
+
+yt provides a way to compute gradients of spatial fields using the
+:meth:`~yt.frontends.flash.data_structures.FLASHDataset.add_gradient_fields`
+method. If you have a spatially-based field such as density or temperature,
+and want to calculate the gradient of that field, you can do it like so:
+
+.. code-block:: python
+
+ ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
+ grad_fields = ds.add_gradient_fields(("gas","temperature"))
+
+where the ``grad_fields`` list will now have a list of new field names that can be used
+in calculations, representing the 3 different components of the field and the magnitude
+of the gradient, e.g., ``"temperature_gradient_x"``, ``"temperature_gradient_y"``,
+``"temperature_gradient_z"``, and ``"temperature_gradient_magnitude"``. To see an example
+of how to create and use these fields, see :ref:`cookbook-complicated-derived-fields`.
+
+.. note::
+
+ ``add_gradient_fields`` currently only supports Cartesian geometries!
+
General Particle Fields
-----------------------
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -82,6 +82,17 @@
.. yt_cookbook:: derived_field.py
+.. _cookbook-complicated-derived-fields:
+
+Complicated Derived Fields
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to use the
+:meth:`~yt.frontends.flash.data_structures.FLASHDataset.add_gradient_fields` method
+to generate gradient fields and use them in a more complex derived field.
+
+.. yt_cookbook:: hse_field.py
+
Using Particle Filters to Calculate Star Formation Rates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a doc/source/cookbook/hse_field.py
--- a/doc/source/cookbook/hse_field.py
+++ b/doc/source/cookbook/hse_field.py
@@ -1,44 +1,32 @@
import numpy as np
import yt
-from yt.fields.field_plugin_registry import \
- register_field_plugin
-from yt.fields.fluid_fields import \
- setup_gradient_fields
-
-
-# Define the components of the gravitational acceleration vector field by
-# taking the gradient of the gravitational potential
- at register_field_plugin
-def setup_my_fields(registry, ftype="gas", slice_info=None):
- setup_gradient_fields(registry, (ftype, "gravitational_potential"),
- "cm ** 2 / s ** 2", slice_info)
-
-# Define the "degree of hydrostatic equilibrium" field
-
-
- at yt.derived_field(name='HSE', units=None, take_log=False,
- display_name='Hydrostatic Equilibrium')
-def HSE(field, data):
-
- gx = data["density"] * data["gravitational_potential_gradient_x"]
- gy = data["density"] * data["gravitational_potential_gradient_y"]
- gz = data["density"] * data["gravitational_potential_gradient_z"]
-
- hx = data["pressure_gradient_x"] - gx
- hy = data["pressure_gradient_y"] - gy
- hz = data["pressure_gradient_z"] - gz
-
- h = np.sqrt((hx * hx + hy * hy + hz * hz) / (gx * gx + gy * gy + gz * gz))
-
- return h
-
-
# Open a dataset from when there's a lot of sloshing going on.
ds = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0350")
-# gradient operator requires periodic boundaries. This dataset has
+# Define the components of the gravitational acceleration vector field by
+# taking the gradient of the gravitational potential
+grad_fields = ds.add_gradient_fields(("gas","gravitational_potential"))
+
+# We don't need to do the same for the pressure field because yt already
+# has pressure gradient fields. Now, define the "degree of hydrostatic
+# equilibrium" field.
+
+def _hse(field, data):
+ # Remember that g is the negative of the potential gradient
+ gx = -data["density"] * data["gravitational_potential_gradient_x"]
+ gy = -data["density"] * data["gravitational_potential_gradient_y"]
+ gz = -data["density"] * data["gravitational_potential_gradient_z"]
+ hx = data["pressure_gradient_x"] - gx
+ hy = data["pressure_gradient_y"] - gy
+ hz = data["pressure_gradient_z"] - gz
+ h = np.sqrt((hx * hx + hy * hy + hz * hz) / (gx * gx + gy * gy + gz * gz))
+ return h
+ds.add_field(('gas','HSE'), function=_hse, units="", take_log=False,
+ display_name='Hydrostatic Equilibrium')
+
+# The gradient operator requires periodic boundaries. This dataset has
# open boundary conditions. We need to hack it for now (this will be fixed
# in future version of yt)
ds.periodicity = (True, True, True)
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -104,7 +104,11 @@
-----------
Athena 4.x VTK data is *mostly* supported and cared for by John
-ZuHone. Both uniform grid and SMR datasets are supported.
+ZuHone. Both uniform grid and SMR datasets are supported.
+
+.. note:
+ yt also recognizes Fargo3D data written to VTK files as
+ Athena data, but support for Fargo3D data is preliminary.
Loading Athena datasets is slightly different depending on whether
your dataset came from a serial or a parallel run. If the data came
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -39,6 +39,28 @@
have the the necessary compilers installed (e.g. the ``build-essentials``
package on debian and ubuntu).
+.. _branches-of-yt:
+
+Branches of yt: ``yt``, ``stable``, and ``yt-2.x``
+++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Before you install yt, you must decide which branch (i.e. version) of the code
+you prefer to use:
+
+* ``yt`` -- The most up-to-date *development* version with the most current features but sometimes unstable (yt-3.x)
+* ``stable`` -- The latest stable release of yt-3.x
+* ``yt-2.x`` -- The latest stable release of yt-2.x
+
+If this is your first time using the code, we recommend using ``stable``,
+unless you specifically need some piece of brand-new functionality only
+available in ``yt`` or need to run an old script developed for ``yt-2.x``.
+There were major API and functionality changes made in yt after version 2.7
+in moving to version 3.0. For a detailed description of the changes
+between versions 2.x (e.g. branch ``yt-2.x``) and 3.x (e.g. branches ``yt`` and
+``stable``) see :ref:`yt3differences`. Lastly, don't feel like you're locked
+into one branch when you install yt, because you can easily change the active
+branch by following the instructions in :ref:`switching-between-yt-versions`.
+
.. _install-script:
All-in-One Installation Script
@@ -60,16 +82,22 @@
its dependencies will be removed from your system (no scattered files remaining
throughout your system).
+.. _installing-yt:
+
Running the Install Script
^^^^^^^^^^^^^^^^^^^^^^^^^^
-To get the installation script, download it from:
+To get the installation script for the ``stable`` branch of the code,
+download it from:
.. code-block:: bash
wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
-.. _installing-yt:
+If you wish to install a different version of yt (see
+:ref:`above <branches-of-yt>`), replace ``stable`` with the appropriate
+branch name (e.g. ``yt``, ``yt-2.x``) in the path above to get the correct
+install script.
By default, the bash install script will install an array of items, but there
are additional packages that can be downloaded and installed (e.g. SciPy, enzo,
@@ -329,8 +357,8 @@
.. _switching-between-yt-versions:
-Switching between yt-2.x and yt-3.x
------------------------------------
+Switching versions of yt: yt-2.x, yt-3.x, stable, and dev
+---------------------------------------------------------
With the release of version 3.0 of yt, development of the legacy yt 2.x series
has been relegated to bugfixes. That said, we will continue supporting the 2.x
@@ -356,12 +384,8 @@
hg update <desired-version>
python setup.py develop
-Valid versions to jump to are:
+Valid versions to jump to are described in :ref:`branches-of-yt`).
-* ``yt`` -- The latest *dev* changes in yt-3.x (can be unstable)
-* ``stable`` -- The latest stable release of yt-3.x
-* ``yt-2.x`` -- The latest stable release of yt-2.x
-
You can check which version of yt you have installed by invoking ``yt version``
at the command line. If you encounter problems, see :ref:`update-errors`.
@@ -387,11 +411,7 @@
hg update <desired-version>
python setup.py install --user --prefix=
-Valid versions to jump to are:
-
-* ``yt`` -- The latest *dev* changes in yt-3.x (can be unstable)
-* ``stable`` -- The latest stable release of yt-3.x
-* ``yt-2.x`` -- The latest stable release of yt-2.x
+Valid versions to jump to are described in :ref:`branches-of-yt`).
You can check which version of yt you have installed by invoking ``yt version``
at the command line. If you encounter problems, see :ref:`update-errors`.
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -227,8 +227,6 @@
~yt.frontends.chombo.data_structures.Orion2Hierarchy
~yt.frontends.chombo.data_structures.Orion2Dataset
~yt.frontends.chombo.io.IOHandlerChomboHDF5
- ~yt.frontends.chombo.io.IOHandlerChombo2DHDF5
- ~yt.frontends.chombo.io.IOHandlerChombo1DHDF5
~yt.frontends.chombo.io.IOHandlerOrion2HDF5
Enzo
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a doc/source/visualizing/FITSImageBuffer.ipynb
--- a/doc/source/visualizing/FITSImageBuffer.ipynb
+++ /dev/null
@@ -1,205 +0,0 @@
-{
- "metadata": {
- "name": "",
- "signature": "sha256:872f7525edd3c1ee09c67f6ecdd8552218df05ebe5ab73bcab55654edf0ac2bb"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
- {
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "yt has capabilities for writing 2D and 3D uniformly gridded data generated from datasets to FITS files. This is via the `FITSImageBuffer` class, which has subclasses `FITSSlice` and `FITSProjection` to write slices and projections directly to FITS. We'll test this out on an Athena dataset."
- ]
- },
- {
- "cell_type": "code",
- "collapsed": false,
- "input": [
- "%matplotlib inline\n",
- "import yt\n",
- "from yt.utilities.fits_image import FITSImageBuffer, FITSSlice, FITSProjection"
- ],
- "language": "python",
- "metadata": {},
- "outputs": []
- },
- {
- "cell_type": "code",
- "collapsed": false,
- "input": [
- "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", parameters={\"length_unit\":(1.0,\"Mpc\"),\n",
- " \"mass_unit\":(1.0e14,\"Msun\"),\n",
- " \"time_unit\":(1.0,\"Myr\")})"
- ],
- "language": "python",
- "metadata": {},
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "To demonstrate a useful example of creating a FITS file, let's first make a `ProjectionPlot`:"
- ]
- },
- {
- "cell_type": "code",
- "collapsed": false,
- "input": [
- "prj = yt.ProjectionPlot(ds, \"z\", [\"temperature\"], weight_field=\"density\", width=(500.,\"kpc\"))\n",
- "prj.show()"
- ],
- "language": "python",
- "metadata": {},
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Suppose that we wanted to write this projection to a FITS file for analysis and visualization in other programs, such as ds9. We can do that using `FITSProjection`:"
- ]
- },
- {
- "cell_type": "code",
- "collapsed": false,
- "input": [
- "prj_fits = FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")"
- ],
- "language": "python",
- "metadata": {},
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "which took the same parameters as `ProjectionPlot` except the width, because `FITSProjection` and `FITSSlice` always make slices and projections of the width of the domain size, at the finest resolution available in the simulation, in a unit determined to be appropriate for the physical size of the dataset. `prj_fits` is a full-fledged FITS file in memory, specifically an [AstroPy `HDUList`](http://astropy.readthedocs.org/en/latest/io/fits/api/hdulists.html) object. This means that we can use all of the methods inherited from `HDUList`:"
- ]
- },
- {
- "cell_type": "code",
- "collapsed": false,
- "input": [
- "prj_fits.info()"
- ],
- "language": "python",
- "metadata": {},
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "`info` shows us the contents of the virtual FITS file. We can also look at the header for the `\"temperature\"` image, like so:"
- ]
- },
- {
- "cell_type": "code",
- "collapsed": false,
- "input": [
- "prj_fits[\"temperature\"].header"
- ],
- "language": "python",
- "metadata": {},
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "where we can see that the temperature units are in Kelvin and the cell widths are in kiloparsecs. The projection can be written to disk using the `writeto` method:"
- ]
- },
- {
- "cell_type": "code",
- "collapsed": false,
- "input": [
- "prj_fits.writeto(\"sloshing.fits\", clobber=True)"
- ],
- "language": "python",
- "metadata": {},
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Since yt can read FITS image files, it can be loaded up just like any other dataset:"
- ]
- },
- {
- "cell_type": "code",
- "collapsed": false,
- "input": [
- "ds2 = yt.load(\"sloshing.fits\")"
- ],
- "language": "python",
- "metadata": {},
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "and we can make a `SlicePlot` of the 2D image, which shows the same data as the previous image:"
- ]
- },
- {
- "cell_type": "code",
- "collapsed": false,
- "input": [
- "slc2 = yt.SlicePlot(ds2, \"z\", [\"temperature\"], width=(500.,\"kpc\"))\n",
- "slc2.set_log(\"temperature\", True)\n",
- "slc2.show()"
- ],
- "language": "python",
- "metadata": {},
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "If you want more fine-grained control over what goes into the FITS file, you can call `FITSImageBuffer` directly, with various kinds of inputs. For example, you could use a `FixedResolutionBuffer`, and specify you want the units in parsecs instead:"
- ]
- },
- {
- "cell_type": "code",
- "collapsed": false,
- "input": [
- "slc3 = ds.slice(0, 0.0)\n",
- "frb = slc3.to_frb((500.,\"kpc\"), 800)\n",
- "fib = FITSImageBuffer(frb, fields=[\"density\",\"temperature\"], units=\"pc\")"
- ],
- "language": "python",
- "metadata": {},
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Finally, a 3D FITS cube can be created from a covering grid:"
- ]
- },
- {
- "cell_type": "code",
- "collapsed": false,
- "input": [
- "cvg = ds.covering_grid(ds.index.max_level, [-0.5,-0.5,-0.5], [64, 64, 64], fields=[\"density\",\"temperature\"])\n",
- "fib = FITSImageBuffer(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")"
- ],
- "language": "python",
- "metadata": {},
- "outputs": []
- }
- ],
- "metadata": {}
- }
- ]
-}
\ No newline at end of file
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a doc/source/visualizing/FITSImageData.ipynb
--- /dev/null
+++ b/doc/source/visualizing/FITSImageData.ipynb
@@ -0,0 +1,409 @@
+{
+ "metadata": {
+ "name": "",
+ "signature": "sha256:c7de5ef190feaa2289595aec7eaa05db02fd535e408e0d04aa54088b0bd3ebae"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+ {
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "yt has capabilities for writing 2D and 3D uniformly gridded data generated from datasets to FITS files. This is via the `FITSImageData` class. We'll test these capabilities out on an Athena dataset."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "import yt\n",
+ "from yt.utilities.fits_image import FITSImageData, FITSProjection"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", parameters={\"length_unit\":(1.0,\"Mpc\"),\n",
+ " \"mass_unit\":(1.0e14,\"Msun\"),\n",
+ " \"time_unit\":(1.0,\"Myr\")})"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Creating FITS images from Slices and Projections"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "There are several ways to make a `FITSImageData` instance. The most intuitive ways are to use the `FITSSlice`, `FITSProjection`, `FITSOffAxisSlice`, and `FITSOffAxisProjection` classes to write slices and projections directly to FITS. To demonstrate a useful example of creating a FITS file, let's first make a `ProjectionPlot`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "prj = yt.ProjectionPlot(ds, \"z\", [\"temperature\"], weight_field=\"density\", width=(500.,\"kpc\"))\n",
+ "prj.show()"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Suppose that we wanted to write this projection to a FITS file for analysis and visualization in other programs, such as ds9. We can do that using `FITSProjection`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "prj_fits = FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "which took the same parameters as `ProjectionPlot` except the width, because `FITSProjection` and `FITSSlice` always make slices and projections of the width of the domain size, at the finest resolution available in the simulation, in a unit determined to be appropriate for the physical size of the dataset."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Because `FITSImageData` inherits from the [AstroPy `HDUList`](http://astropy.readthedocs.org/en/latest/io/fits/api/hdulists.html) class, we can call its methods. For example, `info` shows us the contents of the virtual FITS file:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "prj_fits.info()"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can also look at the header for a particular field:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "prj_fits[\"temperature\"].header"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "where we can see that the temperature units are in Kelvin and the cell widths are in kiloparsecs. If we want the raw image data with units, we can call `get_data`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "prj_fits.get_data(\"temperature\")"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can use the `set_unit` method to change the units of a particular field:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "prj_fits.set_unit(\"temperature\",\"R\")\n",
+ "prj_fits.get_data(\"temperature\")"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The image can be written to disk using the `writeto` method:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "prj_fits.writeto(\"sloshing.fits\", clobber=True)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Since yt can read FITS image files, it can be loaded up just like any other dataset:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "ds2 = yt.load(\"sloshing.fits\")"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "and we can make a `SlicePlot` of the 2D image, which shows the same data as the previous image:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "slc2 = yt.SlicePlot(ds2, \"z\", [\"temperature\"], width=(500.,\"kpc\"))\n",
+ "slc2.set_log(\"temperature\", True)\n",
+ "slc2.show()"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Using `FITSImageData` directly"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "If you want more fine-grained control over what goes into the FITS file, you can call `FITSImageData` directly, with various kinds of inputs. For example, you could use a `FixedResolutionBuffer`, and specify you want the units in parsecs instead:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "slc3 = ds.slice(0, 0.0)\n",
+ "frb = slc3.to_frb((500.,\"kpc\"), 800)\n",
+ "fid_frb = FITSImageData(frb, fields=[\"density\",\"temperature\"], units=\"pc\")"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "A 3D FITS cube can also be created from a covering grid:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "cvg = ds.covering_grid(ds.index.max_level, [-0.5,-0.5,-0.5], [64, 64, 64], fields=[\"density\",\"temperature\"])\n",
+ "fid_cvg = FITSImageData(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Other `FITSImageData` Methods"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "A `FITSImageData` instance can be generated from one previously written to disk using the `from_file` classmethod:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "fid = FITSImageData.from_file(\"sloshing.fits\")\n",
+ "fid.info()"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Multiple `FITSImageData` can be combined to create a new one, provided that the coordinate information is the same:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "prj_fits2 = FITSProjection(ds, \"z\", [\"density\"])\n",
+ "prj_fits3 = FITSImageData.from_images([prj_fits, prj_fits2])\n",
+ "prj_fits3.info()"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Alternatively, individual fields can be popped as well:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "dens_fits = prj_fits3.pop(\"density\")"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "dens_fits.info()"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "prj_fits3.info()"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "So far, the FITS images we have shown have linear spatial coordinates. One may want to take a projection of an object and make a crude mock observation out of it, with celestial coordinates. For this, we can use the `create_sky_wcs` method. Specify a center (RA, Dec) coordinate in degrees, as well as a linear scale in terms of angle per distance:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "sky_center = [30.,45.] # in degrees\n",
+ "sky_scale = (2.5, \"arcsec/kpc\") # could also use a YTQuantity\n",
+ "prj_fits.create_sky_wcs(sky_center, sky_scale, ctype=[\"RA---TAN\",\"DEC--TAN\"])"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "By the default, a tangent RA/Dec projection is used, but one could also use another projection using the `ctype` keyword. We can now look at the header and see it has the appropriate WCS:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "prj_fits[\"temperature\"].header"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Finally, we can add header keywords to a single field or for all fields in the FITS image using `update_header`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "fid_frb.update_header(\"all\", \"time\", 0.1) # Update all the fields\n",
+ "fid_frb.update_header(\"temperature\", \"scale\", \"Rankine\") # Update just one field"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "print fid_frb[\"density\"].header[\"time\"]\n",
+ "print fid_frb[\"temperature\"].header[\"scale\"]"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ }
+ ],
+ "metadata": {}
+ }
+ ]
+}
\ No newline at end of file
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a doc/source/visualizing/callbacks.rst
--- a/doc/source/visualizing/callbacks.rst
+++ b/doc/source/visualizing/callbacks.rst
@@ -483,7 +483,7 @@
import yt
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
s = yt.SlicePlot(ds, 'z', 'density', center='max', width=(10, 'kpc'))
- s.annotate_text((2, 2), coord_system='plot', 'Galaxy!')
+ s.annotate_text((2, 2), 'Galaxy!', coord_system='plot')
s.save()
.. _annotate-title:
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a doc/source/visualizing/manual_plotting.rst
--- a/doc/source/visualizing/manual_plotting.rst
+++ b/doc/source/visualizing/manual_plotting.rst
@@ -66,6 +66,57 @@
setting up multiple axes with colorbars easier than it would be using only
matplotlib can be found in the :ref:`advanced-multi-panel` cookbook recipe.
+.. _frb-filters:
+
+Fixed Resolution Buffer Filters
+-------------------------------
+
+The FRB can be modified by using set of predefined filters in order to e.g.
+create realistically looking, mock observation images out of simulation data.
+Applying filter is an irreversible operation, hence the order in which you are
+using them matters.
+
+.. python-script::
+
+ import matplotlib
+ matplotlib.use('Agg')
+ from matplotlib import pyplot as plt
+
+ import yt
+
+ ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+ slc = ds.slice('z', 0.5)
+ frb = slc.to_frb((20, 'kpc'), 512)
+ frb.apply_gauss_beam(nbeam=30, sigma=2.0)
+ frb.apply_white_noise(5e-23)
+ plt.imshow(frb['density'].d)
+ plt.savefig('frb_filters.png')
+
+Currently available filters:
+
+Gaussian Smoothing
+~~~~~~~~~~~~~~~~~~
+
+.. function:: apply_gauss_beam(self, nbeam=30, sigma=2.0)
+
+ (This is a proxy for
+ :class:`~yt.visualization.fixed_resolution_filters.FixedResolutionBufferGaussBeamFilter`.)
+
+ This filter convolves the FRB with 2d Gaussian that is "nbeam" pixel wide
+ and has standard deviation "sigma".
+
+White Noise
+~~~~~~~~~~~
+
+.. function:: apply_white_noise(self, bg_lvl=None)
+
+ (This is a proxy for
+ :class:`~yt.visualization.fixed_resolution_filters.FixedResolutionBufferWhiteNoiseFilter`.)
+
+ This filter adds white noise with the amplitude "bg_lvl" to the FRB.
+ If "bg_lvl" is not present, 10th percentile of the FRB's values is used
+ instead.
+
.. _manual-line-plots:
Line Plots
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a doc/source/visualizing/writing_fits_images.rst
--- a/doc/source/visualizing/writing_fits_images.rst
+++ b/doc/source/visualizing/writing_fits_images.rst
@@ -3,4 +3,4 @@
Writing FITS Images
==========================
-.. notebook:: FITSImageBuffer.ipynb
\ No newline at end of file
+.. notebook:: FITSImageData.ipynb
\ No newline at end of file
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a setup.py
--- a/setup.py
+++ b/setup.py
@@ -49,19 +49,18 @@
REASON_FILES.append((dir_name, files))
# Verify that we have Cython installed
+REQ_CYTHON = '0.22'
try:
import Cython
- if version.LooseVersion(Cython.__version__) < version.LooseVersion('0.16'):
- needs_cython = True
- else:
- needs_cython = False
+ needs_cython = \
+ version.LooseVersion(Cython.__version__) < version.LooseVersion(REQ_CYTHON)
except ImportError as e:
needs_cython = True
if needs_cython:
print("Cython is a build-time requirement for the source tree of yt.")
print("Please either install yt from a provided, release tarball,")
- print("or install Cython (version 0.16 or higher).")
+ print("or install Cython (version %s or higher)." % REQ_CYTHON)
print("You may be able to accomplish this by typing:")
print(" pip install -U Cython")
sys.exit(1)
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -18,8 +18,16 @@
charge_proton_cgs, \
mass_electron_cgs, \
speed_of_light_cgs
+from yt.utilities.on_demand_imports import _scipy, NotAModule
-def voigt(a,u):
+special = _scipy.special
+
+def voigt_scipy(a, u):
+ x = np.asarray(u).astype(np.float64)
+ y = np.asarray(a).astype(np.float64)
+ return special.wofz(x + 1j * y).real
+
+def voigt_old(a, u):
"""
NAME:
VOIGT
@@ -209,3 +217,8 @@
tauphi = (tau0 * phi).in_units("") # profile scaled with tau0
return (lambda_bins, tauphi)
+
+if isinstance(special, NotAModule):
+ voigt = voigt_old
+else:
+ voigt = voigt_scipy
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -274,8 +274,8 @@
'b_thermal': thermal_b[lixel],
'redshift': field_data['redshift'][lixel],
'v_pec': peculiar_velocity})
- pbar.update(i)
- pbar.finish()
+ pbar.update(i)
+ pbar.finish()
del column_density, delta_lambda, thermal_b, \
center_bins, width_ratio, left_index, right_index
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -10,8 +10,11 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import yt
-from yt.testing import *
+import numpy as np
+from yt.testing import \
+ assert_allclose, requires_file, requires_module
+from yt.analysis_modules.absorption_spectrum.absorption_line import \
+ voigt_old, voigt_scipy
from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
from yt.analysis_modules.cosmological_observation.api import LightRay
import tempfile
@@ -20,6 +23,7 @@
COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
+
@requires_file(COSMO_PLUS)
def test_absorption_spectrum():
"""
@@ -44,22 +48,24 @@
my_label = 'HI Lya'
field = 'H_number_density'
- wavelength = 1215.6700 # Angstromss
+ wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
- sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10)
+ sp.add_line(my_label, field, wavelength, f_value,
+ gamma, mass, label_threshold=1.e10)
my_label = 'HI Lya'
field = 'H_number_density'
- wavelength = 912.323660 # Angstroms
+ wavelength = 912.323660 # Angstroms
normalization = 1.6e17
index = 3.0
sp.add_continuum(my_label, field, wavelength, normalization, index)
- wavelength, flux = sp.make_spectrum('lightray.h5', output_file='spectrum.txt',
+ wavelength, flux = sp.make_spectrum('lightray.h5',
+ output_file='spectrum.txt',
line_list_file='lines.txt',
use_peculiar_velocity=True)
@@ -93,25 +99,34 @@
my_label = 'HI Lya'
field = 'H_number_density'
- wavelength = 1215.6700 # Angstromss
+ wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
- sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10)
+ sp.add_line(my_label, field, wavelength, f_value,
+ gamma, mass, label_threshold=1.e10)
my_label = 'HI Lya'
field = 'H_number_density'
- wavelength = 912.323660 # Angstroms
+ wavelength = 912.323660 # Angstroms
normalization = 1.6e17
index = 3.0
sp.add_continuum(my_label, field, wavelength, normalization, index)
- wavelength, flux = sp.make_spectrum('lightray.h5', output_file='spectrum.fits',
+ wavelength, flux = sp.make_spectrum('lightray.h5',
+ output_file='spectrum.fits',
line_list_file='lines.txt',
use_peculiar_velocity=True)
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
+
+
+ at requires_module("scipy")
+def test_voigt_profiles():
+ a = 1.7e-4
+ x = np.linspace(5.0, -3.6, 60)
+ yield assert_allclose, voigt_old(a, x), voigt_scipy(a, x), 1e-8
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -400,7 +400,7 @@
The field used as the overdensity from which interpolation is done to
calculate virial quantities.
Default: ("gas", "overdensity")
- critical_density : float
+ critical_overdensity : float
The value of the overdensity at which to evaulate the virial quantities.
Overdensity is with respect to the critical density.
Default: 200
diff -r 0b129f7d5fcf2d3918daa7f91cf164bb2deb5bb3 -r bf0e6d76f16abba06749e4167aedee5adf56628a yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -25,7 +25,7 @@
from yt.extern.six import string_types
import numpy as np
from yt.funcs import *
-from yt.utilities.physical_constants import mp, kboltz
+from yt.utilities.physical_constants import mp
from yt.utilities.parallel_tools.parallel_analysis_interface import \
parallel_objects
from yt.units.yt_array import uconcatenate
@@ -34,6 +34,12 @@
kT_min = 8.08e-2
kT_max = 50.
+photon_units = {"Energy":"keV",
+ "dx":"kpc"}
+for ax in "xyz":
+ photon_units[ax] = "kpc"
+ photon_units["v"+ax] = "km/s"
+
class PhotonModel(object):
def __init__(self):
@@ -46,7 +52,7 @@
class ThermalPhotonModel(PhotonModel):
r"""
Initialize a ThermalPhotonModel from a thermal spectrum.
-
+
Parameters
----------
@@ -61,30 +67,37 @@
photons_per_chunk : integer
The maximum number of photons that are allocated per chunk. Increase or decrease
as needed.
+ method : string, optional
+ The method used to generate the photon energies from the spectrum:
+ "invert_cdf": Invert the cumulative distribution function of the spectrum.
+ "accept_reject": Acceptance-rejection method using the spectrum.
+ The first method should be sufficient for most cases.
"""
- def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, photons_per_chunk=10000000):
+ def __init__(self, spectral_model, X_H=0.75, Zmet=0.3,
+ photons_per_chunk=10000000, method="invert_cdf"):
self.X_H = X_H
self.Zmet = Zmet
self.spectral_model = spectral_model
self.photons_per_chunk = photons_per_chunk
+ self.method = method
def __call__(self, data_source, parameters):
-
+
ds = data_source.ds
exp_time = parameters["FiducialExposureTime"]
area = parameters["FiducialArea"]
redshift = parameters["FiducialRedshift"]
D_A = parameters["FiducialAngularDiameterDistance"].in_cgs()
- dist_fac = 1.0/(4.*np.pi*D_A.value*D_A.value*(1.+redshift)**3)
+ dist_fac = 1.0/(4.*np.pi*D_A.value*D_A.value*(1.+redshift)**2)
src_ctr = parameters["center"]
- vol_scale = 1.0/np.prod(ds.domain_width.in_cgs().to_ndarray())
-
my_kT_min, my_kT_max = data_source.quantities.extrema("kT")
- self.spectral_model.prepare()
- energy = self.spectral_model.ebins
+ self.spectral_model.prepare_spectrum(redshift)
+ emid = self.spectral_model.emid
+ ebins = self.spectral_model.ebins
+ nchan = len(emid)
citer = data_source.chunks([], "io")
@@ -99,7 +112,13 @@
photons["Energy"] = []
photons["NumberOfPhotons"] = []
- spectral_norm = area.v*exp_time.v*dist_fac/vol_scale
+ spectral_norm = area.v*exp_time.v*dist_fac
+
+ tot_num_cells = data_source.ires.shape[0]
+
+ pbar = get_pbar("Generating photons ", tot_num_cells)
+
+ cell_counter = 0
for chunk in parallel_objects(citer):
@@ -114,7 +133,7 @@
if isinstance(self.Zmet, string_types):
metalZ = chunk[self.Zmet].v
else:
- metalZ = self.Zmet
+ metalZ = self.Zmet*np.ones(num_cells)
idxs = np.argsort(kT)
@@ -133,7 +152,7 @@
n += bcount
kT_idxs = np.unique(kT_idxs)
- cell_em = EM[idxs]*vol_scale
+ cell_em = EM[idxs]*spectral_norm
number_of_photons = np.zeros(num_cells, dtype="uint64")
energies = np.zeros(self.photons_per_chunk)
@@ -141,8 +160,6 @@
start_e = 0
end_e = 0
- pbar = get_pbar("Generating photons for chunk ", num_cells)
-
for ibegin, iend, ikT in zip(bcell, ecell, kT_idxs):
kT = kT_bins[ikT] + 0.5*dkT
@@ -151,58 +168,57 @@
cem = cell_em[ibegin:iend]
- em_sum_c = cem.sum()
- if isinstance(self.Zmet, string_types):
- em_sum_m = (metalZ[ibegin:iend]*cem).sum()
- else:
- em_sum_m = metalZ*em_sum_c
-
cspec, mspec = self.spectral_model.get_spectrum(kT)
- cumspec_c = np.cumsum(cspec.d)
- tot_ph_c = cumspec_c[-1]*spectral_norm*em_sum_c
- cumspec_c /= cumspec_c[-1]
- cumspec_c = np.insert(cumspec_c, 0, 0.0)
-
- cumspec_m = np.cumsum(mspec.d)
- tot_ph_m = cumspec_m[-1]*spectral_norm*em_sum_m
- cumspec_m /= cumspec_m[-1]
- cumspec_m = np.insert(cumspec_m, 0, 0.0)
+ tot_ph_c = cspec.d.sum()
+ tot_ph_m = mspec.d.sum()
u = np.random.random(size=n_current)
- cell_norm_c = tot_ph_c*cem/em_sum_c
- cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u)
-
- if isinstance(self.Zmet, string_types):
- cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem/em_sum_m
- else:
- cell_norm_m = tot_ph_m*metalZ*cem/em_sum_m
- cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u)
-
- number_of_photons[ibegin:iend] = cell_n_c + cell_n_m
+ cell_norm_c = tot_ph_c*cem
+ cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem
+ cell_norm = np.modf(cell_norm_c + cell_norm_m)
+ cell_n = np.uint64(cell_norm[1]) + np.uint64(cell_norm[0] >= u)
- end_e += int((cell_n_c+cell_n_m).sum())
+ number_of_photons[ibegin:iend] = cell_n
+
+ end_e += int(cell_n.sum())
if end_e > self.photons_per_chunk:
raise RuntimeError("Number of photons generated for this chunk "+
"exceeds photons_per_chunk (%d)! " % self.photons_per_chunk +
"Increase photons_per_chunk!")
- energies[start_e:end_e] = _generate_energies(cell_n_c, cell_n_m, cumspec_c, cumspec_m, energy)
-
+ if self.method == "invert_cdf":
+ cumspec_c = np.cumsum(cspec.d)
+ cumspec_m = np.cumsum(mspec.d)
+ cumspec_c = np.insert(cumspec_c, 0, 0.0)
+ cumspec_m = np.insert(cumspec_m, 0, 0.0)
+
+ ei = start_e
+ for cn, Z in zip(number_of_photons[ibegin:iend], metalZ[ibegin:iend]):
+ if cn == 0: continue
+ if self.method == "invert_cdf":
+ cumspec = cumspec_c + Z*cumspec_m
+ cumspec /= cumspec[-1]
+ randvec = np.random.uniform(size=cn)
+ randvec.sort()
+ cell_e = np.interp(randvec, cumspec, ebins)
+ elif self.method == "accept_reject":
+ tot_spec = cspec.d+Z*mspec.d
+ tot_spec /= tot_spec.sum()
+ eidxs = np.random.choice(nchan, size=cn, p=tot_spec)
+ cell_e = emid[eidxs]
+ energies[ei:ei+cn] = cell_e
+ cell_counter += 1
+ pbar.update(cell_counter)
+ ei += cn
+
start_e = end_e
- pbar.update(iend)
-
- pbar.finish()
-
active_cells = number_of_photons > 0
idxs = idxs[active_cells]
- mylog.info("Number of photons generated for this chunk: %d" % int(number_of_photons.sum()))
- mylog.info("Number of cells with photons: %d" % int(active_cells.sum()))
-
photons["NumberOfPhotons"].append(number_of_photons[active_cells])
photons["Energy"].append(ds.arr(energies[:end_e].copy(), "keV"))
photons["x"].append((chunk["x"][idxs]-src_ctr[0]).in_units("kpc"))
@@ -213,23 +229,17 @@
photons["vz"].append(chunk["velocity_z"][idxs].in_units("km/s"))
photons["dx"].append(chunk["dx"][idxs].in_units("kpc"))
+ pbar.finish()
+
for key in photons:
if len(photons[key]) > 0:
photons[key] = uconcatenate(photons[key])
+ elif key == "NumberOfPhotons":
+ photons[key] = np.array([])
+ else:
+ photons[key] = YTArray([], photon_units[key])
+
+ mylog.info("Number of photons generated: %d" % int(np.sum(photons["NumberOfPhotons"])))
+ mylog.info("Number of cells with photons: %d" % len(photons["x"]))
return photons
-
-def _generate_energies(cell_n_c, cell_n_m, counts_c, counts_m, energy):
- energies = np.array([])
- for cn_c, cn_m in zip(cell_n_c, cell_n_m):
- if cn_c > 0:
- randvec_c = np.random.uniform(size=cn_c)
- randvec_c.sort()
- cell_e_c = np.interp(randvec_c, counts_c, energy)
- energies = np.append(energies, cell_e_c)
- if cn_m > 0:
- randvec_m = np.random.uniform(size=cn_m)
- randvec_m.sort()
- cell_e_m = np.interp(randvec_m, counts_m, energy)
- energies = np.append(energies, cell_e_m)
- return energies
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/989be30f7b70/
Changeset: 989be30f7b70
Branch: yt
User: chummels
Date: 2015-07-16 17:08:41+00:00
Summary: Merged in RicardaBeckmann/yt (pull request #1610)
Updating units for ramses runs when boxlength matters
Affected #: 2 files
diff -r 7709f96fe34aabce6ade8b00bac40e866d218c08 -r 989be30f7b70fc8e144581d1882c1dac913b25f9 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -11,6 +11,7 @@
yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
yt/analysis_modules/ppv_cube/ppv_utils.c
yt/frontends/ramses/_ramses_reader.cpp
+yt/frontends/sph/smoothing_kernel.c
yt/geometry/fake_octree.c
yt/geometry/grid_container.c
yt/geometry/grid_visitors.c
@@ -40,6 +41,7 @@
yt/utilities/lib/mesh_utilities.c
yt/utilities/lib/misc_utilities.c
yt/utilities/lib/Octree.c
+yt/utilities/lib/GridTree.c
yt/utilities/lib/origami.c
yt/utilities/lib/pixelization_routines.c
yt/utilities/lib/png_writer.c
diff -r 7709f96fe34aabce6ade8b00bac40e866d218c08 -r 989be30f7b70fc8e144581d1882c1dac913b25f9 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -491,13 +491,22 @@
"""
Generates the conversion to various physical _units based on the parameter file
"""
- # Note that unit_l *already* converts to proper!
- # Also note that unit_l must be multiplied by the boxlen parameter to
- # ensure we are correctly set up for the current domain.
- length_unit = self.parameters['unit_l']
- boxlen = self.parameters['boxlen']
- density_unit = self.parameters['unit_d']
- mass_unit = density_unit * (length_unit * boxlen)**3
+ #Please note that for all units given in the info file, the boxlen
+ #still needs to be folded in, as shown below!
+
+ boxlen=self.parameters['boxlen']
+ length_unit = self.parameters['unit_l'] * boxlen
+ density_unit = self.parameters['unit_d']/ boxlen**3
+
+ # In the mass unit, the factors of boxlen cancel back out, so this
+ #is equivalent to unit_d*unit_l**3
+
+ mass_unit = density_unit * length_unit**3
+
+ # Cosmological runs are done in lookback conformal time.
+ # To convert to proper time, the time unit is calculated from
+ # the expansion factor. This is not yet done here!
+
time_unit = self.parameters['unit_t']
magnetic_unit = np.sqrt(4*np.pi * mass_unit /
(time_unit**2 * length_unit))
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list