[yt-svn] commit/yt: 2 new changesets
Bitbucket
commits-noreply at bitbucket.org
Wed Mar 28 12:01:12 PDT 2012
2 new commits in yt:
https://bitbucket.org/yt_analysis/yt/changeset/40b187755e81/
changeset: 40b187755e81
branch: yt
user: MatthewTurk
date: 2012-03-28 20:56:38
summary: Fixes for the RecurseOctreeByLevels function in the case where you're bordering
on the precision limit at your highest levels.
affected #: 2 files
diff -r 495e3a9c0b067e11ce218fdb447065092a362175 -r 40b187755e810e82e6dda74682fe3ae309bdd870 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -294,17 +294,17 @@
o_length = na.sum(levels_finest.values())
r_length = na.sum(levels_all.values())
output = na.zeros((r_length,len(fields)), dtype='float64')
- genealogy = na.zeros((r_length, 3), dtype='int32') - 1 # init to -1
+ genealogy = na.zeros((r_length, 3), dtype='int64') - 1 # init to -1
corners = na.zeros((r_length, 3), dtype='float64')
position = na.add.accumulate(
na.array([0] + [levels_all[v] for v in
- sorted(levels_all)[:-1]], dtype='int32'))
+ sorted(levels_all)[:-1]], dtype='int64'), dtype="int64")
pp = position.copy()
amr_utils.RecurseOctreeByLevels(0, 0, 0,
ogl[0].dimensions[0],
ogl[0].dimensions[1],
ogl[0].dimensions[2],
- position.astype('int32'), 1,
+ position.astype('int64'), 1,
output, genealogy, corners, ogl)
return output, genealogy, levels_all, levels_finest, pp, corners
diff -r 495e3a9c0b067e11ce218fdb447065092a362175 -r 40b187755e810e82e6dda74682fe3ae309bdd870 yt/utilities/_amr_utils/DepthFirstOctree.pyx
--- a/yt/utilities/_amr_utils/DepthFirstOctree.pyx
+++ b/yt/utilities/_amr_utils/DepthFirstOctree.pyx
@@ -27,6 +27,21 @@
cimport numpy as np
cimport cython
+cdef extern from "math.h":
+ double exp(double x)
+ float expf(float x)
+ long double expl(long double x)
+ double floor(double x)
+ double ceil(double x)
+ double fmod(double x, double y)
+ double log2(double x)
+ long int lrint(double x)
+ double fabs(double x)
+ double cos(double x)
+ double sin(double x)
+ double asin(double x)
+ double acos(double x)
+
cdef class position:
cdef public int output_pos, refined_pos
def __cinit__(self):
@@ -107,16 +122,15 @@
curpos, ci - grid.offset, output, refined, grids)
return s
- at cython.boundscheck(False)
def RecurseOctreeByLevels(int i_i, int j_i, int k_i,
int i_f, int j_f, int k_f,
- np.ndarray[np.int32_t, ndim=1] curpos,
+ np.ndarray[np.int64_t, ndim=1] curpos,
int gi,
np.ndarray[np.float64_t, ndim=2] output,
- np.ndarray[np.int32_t, ndim=2] genealogy,
+ np.ndarray[np.int64_t, ndim=2] genealogy,
np.ndarray[np.float64_t, ndim=2] corners,
OctreeGridList grids):
- cdef np.int32_t i, i_off, j, j_off, k, k_off, ci, fi
+ cdef np.int64_t i, i_off, j, j_off, k, k_off, ci, fi
cdef int child_i, child_j, child_k
cdef OctreeGrid child_grid
cdef OctreeGrid grid = grids[gi-1]
@@ -129,7 +143,7 @@
cdef np.float64_t child_dx
cdef np.ndarray[np.float64_t, ndim=1] child_leftedges
cdef np.float64_t cx, cy, cz
- cdef int cp
+ cdef np.int64_t cp
cdef int s = 0
for i_off in range(i_f):
i = i_off + i_i
@@ -153,9 +167,9 @@
child_grid = grids[ci-1]
child_dx = child_grid.dx[0]
child_leftedges = child_grid.left_edges
- child_i = int((cx-child_leftedges[0])/child_dx)
- child_j = int((cy-child_leftedges[1])/child_dx)
- child_k = int((cz-child_leftedges[2])/child_dx)
+ child_i = lrint((cx-child_leftedges[0])/child_dx)
+ child_j = lrint((cy-child_leftedges[1])/child_dx)
+ child_k = lrint((cz-child_leftedges[2])/child_dx)
# set current child id to id of next cell to examine
genealogy[cp, 0] = curpos[level+1]
# set next parent id to id of current cell
https://bitbucket.org/yt_analysis/yt/changeset/6b1e902e3e25/
changeset: 6b1e902e3e25
branch: yt
user: MatthewTurk
date: 2012-03-28 20:57:38
summary: Merging in the FLASH changes.
affected #: 3 files
diff -r 40b187755e810e82e6dda74682fe3ae309bdd870 -r 6b1e902e3e254030b8c41061c222c2b1a152cb9a yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -105,6 +105,9 @@
NumNeighbors = 15
NumDB = 5
+def minus_one():
+ return -1
+
class DatabaseFunctions(object):
# Common database functions so it doesn't have to be repeated.
def _open_database(self):
@@ -394,10 +397,18 @@
self.candidates = candidates
# This stores the masses contributed to each child candidate.
- self.child_mass_arr = na.zeros(len(candidates)*NumNeighbors, dtype='float64')
+ # The +1 is an extra element in the array that collects garbage
+ # values. This is allowing us to eliminate a try/except later.
+ # This extra array element will be cut off eventually.
+ self.child_mass_arr = na.zeros(len(candidates)*NumNeighbors + 1,
+ dtype='float64')
# Records where to put the entries in the above array.
self.child_mass_loc = defaultdict(dict)
+ # Fill it out with sub-nested default dicts that point to the
+ # garbage slot, and then fill it will correct values for (possibly)
+ # related parent/child halo pairs.
for i,halo in enumerate(sorted(candidates)):
+ self.child_mass_loc[halo] = defaultdict(minus_one)
for j, child in enumerate(candidates[halo]):
self.child_mass_loc[halo][child] = i*NumNeighbors + j
@@ -498,7 +509,7 @@
# Match particles in halos.
self._match(parent_IDs, child_IDs, parent_halos, child_halos,
- parent_masses, parent_send, child_send)
+ parent_masses, parent_send, child_send)
# Now we send all the un-matched particles to the root task for one more
# pass. This depends on the assumption that most of the particles do
@@ -540,6 +551,9 @@
# Now we sum up the contributions globally.
self.child_mass_arr = self.comm.mpi_allreduce(self.child_mass_arr)
+ # Trim off the garbage collection.
+ self.child_mass_arr = self.child_mass_arr[:-1]
+
if self.comm.rank == 0:
# Turn these Msol masses into percentages of the parent.
line = "SELECT HaloMass FROM Halos WHERE SnapCurrentTimeIdentifier=%d \
@@ -603,45 +617,28 @@
def _match(self, parent_IDs, child_IDs, parent_halos, child_halos,
parent_masses, parent_send = None, child_send = None):
- # Parent IDs on the left, child IDs on the right. We skip down both
- # columns matching IDs. If they are out of synch, the index(es) is/are
- # advanced until they match up again.
- matched = 0
- left = 0
- right = 0
- while left < parent_IDs.size and right < child_IDs.size:
- if parent_IDs[left] == child_IDs[right]:
- # They match up, add this relationship.
- try:
- loc = self.child_mass_loc[parent_halos[left]][child_halos[right]]
- except KeyError:
- # This happens when a child halo contains a particle from
- # a parent halo, but the child is not identified as a
- # candidate child halo. So we do nothing and move on with
- # our lives.
- left += 1
- right += 1
- continue
- self.child_mass_arr[loc] += parent_masses[left]
- # If needed, mark this pair so we don't send them later.
- if parent_send is not None:
- parent_send[left] = False
- child_send[right] = False
- matched += 1
- left += 1
- right += 1
- continue
- if parent_IDs[left] < child_IDs[right]:
- # The left is too small, so we need to increase it.
- left += 1
- continue
- if parent_IDs[left] > child_IDs[right]:
- # Right too small.
- right += 1
- continue
+ # Pick out IDs that are in both arrays.
+ parent_in_child = na.in1d(parent_IDs, child_IDs, assume_unique = True)
+ child_in_parent = na.in1d(child_IDs, parent_IDs, assume_unique = True)
+ # Pare down the arrays to just matched particle IDs.
+ parent_halos_cut = parent_halos[parent_in_child]
+ child_halos_cut = child_halos[child_in_parent]
+ parent_masses_cut = parent_masses[parent_in_child]
+ # Mark the IDs that have matches so they're not sent later.
+ if parent_send is not None:
+ parent_send[parent_in_child] = False
+ child_send[child_in_parent] = False
+ # For matching pairs of particles, add the contribution of the mass.
+ # Occasionally, there are matches of particle IDs where the parent
+ # and child halos have not been identified as likely relations,
+ # and in that case loc will be returned as -1, which is the 'garbage'
+ # position in child_mass_arr. This will be trimmed off later.
+ for i,pair in enumerate(zip(parent_halos_cut, child_halos_cut)):
+ loc = self.child_mass_loc[pair[0]][pair[1]]
+ self.child_mass_arr[loc] += parent_masses_cut[i]
if parent_send is None:
mylog.info("Clean-up round matched %d of %d parents and %d children." % \
- (matched, parent_IDs.size, child_IDs.size))
+ (parent_in_child.sum(), parent_IDs.size, child_IDs.size))
def _copy_and_update_db(self):
"""
diff -r 40b187755e810e82e6dda74682fe3ae309bdd870 -r 6b1e902e3e254030b8c41061c222c2b1a152cb9a yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -230,6 +230,9 @@
self.parameters["EOSType"] = -1
if self.cosmological_simulation == 1:
self._setup_comoving_units()
+ if "pc_unitsbase" in self.parameters:
+ if self.parameters["pc_unitsbase"] == "CGS":
+ self.setup_cgs_units()
else:
self._setup_nounits_units()
self.time_units['1'] = 1
@@ -265,6 +268,22 @@
for unit in mpc_conversion.keys():
self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+ def _setup_cgs_units(self):
+ self.conversion_factors['dens'] = 1.0
+ self.conversion_factors['pres'] = 1.0
+ self.conversion_factors['eint'] = 1.0
+ self.conversion_factors['ener'] = 1.0
+ self.conversion_factors['temp'] = 1.0
+ self.conversion_factors['velx'] = 1.0
+ self.conversion_factors['vely'] = 1.0
+ self.conversion_factors['velz'] = 1.0
+ self.conversion_factors['particle_velx'] = 1.0
+ self.conversion_factors['particle_vely'] = 1.0
+ self.conversion_factors['particle_velz'] = 1.0
+ self.conversion_factors["Time"] = 1.0
+ for unit in mpc_conversion.keys():
+ self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
def _setup_nounits_units(self):
self.conversion_factors['dens'] = 1.0
self.conversion_factors['pres'] = 1.0
@@ -277,7 +296,6 @@
self.conversion_factors['particle_velx'] = 1.0
self.conversion_factors['particle_vely'] = 1.0
self.conversion_factors['particle_velz'] = 1.0
- z = 0
mylog.warning("Setting 1.0 in code units to be 1.0 cm")
if not self.has_key("TimeUnits"):
mylog.warning("No time units. Setting 1.0 = 1 second.")
@@ -309,24 +327,37 @@
self._handle["sim info"][:]["file format version"])
else:
raise RuntimeError("Can't figure out FLASH file version.")
+ # First we load all of the parameters
+ hns = ["simulation parameters"]
+ # note the ordering here is important: runtime parameters should
+ # ovewrite scalars with the same name.
+ for ptype in ['scalars', 'runtime parameters']:
+ for vtype in ['integer', 'real', 'logical','string']:
+ hns.append("%s %s" % (vtype, ptype))
+ for hn in hns:
+ if hn not in self._handle:
+ continue
+ for varname, val in self._handle[hn]:
+ vn = varname.strip()
+ if vn in self.parameters and self.parameters[vn] != val:
+ mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn))
+ self.parameters[vn] = val
self.domain_left_edge = na.array(
- [self._find_parameter("real", "%smin" % ax) for ax in 'xyz']).astype("float64")
+ [self.parameters["%smin" % ax] for ax in 'xyz']).astype("float64")
self.domain_right_edge = na.array(
- [self._find_parameter("real", "%smax" % ax) for ax in 'xyz']).astype("float64")
- self.min_level = self._find_parameter(
- "integer", "lrefine_min", scalar = False) - 1
+ [self.parameters["%smax" % ax] for ax in 'xyz']).astype("float64")
+ self.min_level = self.parameters["lrefine_min"] -1
# Determine domain dimensions
try:
- nxb = self._find_parameter("integer", "nxb", scalar = True)
- nyb = self._find_parameter("integer", "nyb", scalar = True)
- nzb = self._find_parameter("integer", "nzb", scalar = True)
+ nxb = self.parameters["nxb"]
+ nyb = self.parameters["nyb"]
+ nzb = self.parameters["nzb"]
except KeyError:
nxb, nyb, nzb = [int(self._handle["/simulation parameters"]['n%sb' % ax])
for ax in 'xyz'] # FLASH2 only!
try:
- dimensionality = self._find_parameter("integer", "dimensionality",
- scalar = True)
+ dimensionality = self.parameters["dimensionality"]
except KeyError:
dimensionality = 3
if nzb == 1: dimensionality = 2
@@ -334,45 +365,28 @@
if dimensionality < 3:
mylog.warning("Guessing dimensionality as %s", dimensionality)
- nblockx = self._find_parameter("integer", "nblockx")
- nblocky = self._find_parameter("integer", "nblocky")
- nblockz = self._find_parameter("integer", "nblockz")
+ nblockx = self.parameters["nblockx"]
+ nblocky = self.parameters["nblocky"]
+ nblockz = self.parameters["nblockz"]
self.dimensionality = dimensionality
self.domain_dimensions = \
na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
-
try:
- self.parameters['Gamma'] = self._find_parameter("real", "gamma")
- except KeyError:
+ self.parameters["Gamma"] = self.parameters["gamma"]
+ except:
+ mylog.warning("Cannot find Gamma")
pass
- if self._flash_version == 7:
- self.current_time = float(
- self._handle["simulation parameters"][:]["time"])
- else:
- self.current_time = \
- float(self._find_parameter("real", "time", scalar=True))
+ self.current_time = self.parameters["time"]
- if self._flash_version == 7:
- self.parameters['timestep'] = float(
- self._handle["simulation parameters"]["timestep"])
- else:
- self.parameters['timestep'] = \
- float(self._find_parameter("real", "dt", scalar=True))
-
- try:
- use_cosmo = self._find_parameter("logical", "usecosmology")
+ try:
+ self.parameters["usecosmology"]
+ self.cosmological_simulation = 1
+ self.current_redshift = self.parameters['redshift']
+ self.omega_lambda = self.parameters['cosmologicalconstant']
+ self.omega_matter = self.parameters['omegamatter']
+ self.hubble_constant = self.parameters['hubbleconstant']
except:
- use_cosmo = 0
-
- if use_cosmo == 1:
- self.cosmological_simulation = 1
- self.current_redshift = self._find_parameter("real", "redshift",
- scalar = True)
- self.omega_lambda = self._find_parameter("real", "cosmologicalconstant")
- self.omega_matter = self._find_parameter("real", "omegamatter")
- self.hubble_constant = self._find_parameter("real", "hubbleconstant")
- else:
self.current_redshift = self.omega_lambda = self.omega_matter = \
self.hubble_constant = self.cosmological_simulation = 0.0
diff -r 40b187755e810e82e6dda74682fe3ae309bdd870 -r 6b1e902e3e254030b8c41061c222c2b1a152cb9a yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -65,6 +65,7 @@
"TotalEnergy": "ener",
"GasEnergy": "eint",
"Temperature": "temp",
+ "Pressure" : "pres",
"particle_position_x" : "particle_posx",
"particle_position_y" : "particle_posy",
"particle_position_z" : "particle_posz",
@@ -212,3 +213,16 @@
function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
particle_type=True, convert_function=_convertParticleMassMsun,
particle_convert_function=_ParticleMassMsun)
+
+def _ThermalEnergy(fields,data):
+ te = data["TotalEnergy"] - 0.5 * data["Density"] * (
+ data["x-velocity"]**2.0 +
+ data["y-velocity"]**2.0 +
+ data["z-velocity"]**2.0 )
+ try:
+ te -= data['magp']
+ except:
+ pass
+ return te
+add_field("ThermalEnergy", function=_ThermalEnergy,
+ units=r"\rm{ergs}/\rm{cm^3}")
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list