[yt-svn] commit/yt: 4 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Fri May 2 12:26:31 PDT 2014
4 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/44b9afc5fac8/
Changeset: 44b9afc5fac8
Branch: yt-3.0
User: ngoldbaum
Date: 2014-04-11 05:55:25
Summary: Specify a stdout StreamHandler as the logging handler.
Affected #: 1 file
diff -r 794b2beb2c48272fed16de10daa07975309b950f -r 44b9afc5fac8b0efa538e2c9662a7fdeda5f9451 yt/utilities/logger.py
--- a/yt/utilities/logger.py
+++ b/yt/utilities/logger.py
@@ -15,6 +15,7 @@
#-----------------------------------------------------------------------------
import logging
+import sys
from yt.config import ytcfg
# This next bit is grabbed from:
@@ -47,13 +48,13 @@
cfstring = "%(name)-3s: [%(levelname)-18s] %(asctime)s %(message)s"
logging.basicConfig(
format=ufstring,
- level=level
+ level=level,
+ stream=sys.stdout
)
rootLogger = logging.getLogger()
ytLogger = logging.getLogger("yt")
-
def disable_stream_logging():
# We just remove the root logger's handlers
for handler in rootLogger.handlers:
https://bitbucket.org/yt_analysis/yt/commits/51cfb2317ed3/
Changeset: 51cfb2317ed3
Branch: yt-3.0
User: ngoldbaum
Date: 2014-04-14 17:37:46
Summary: Merging with mainline tip.
Affected #: 10 files
diff -r 44b9afc5fac8b0efa538e2c9662a7fdeda5f9451 -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -61,6 +61,7 @@
self.field_list = field_list
self.slice_info = slice_info
self.field_aliases = {}
+ self.species_names = []
self.setup_fluid_aliases()
def setup_fluid_fields(self):
@@ -161,6 +162,8 @@
:class:`~yt.data_objects.api.DerivedField`.
"""
+ override = kwargs.pop("force_override", False)
+ if not override and name in self: return
if function is None:
def create_function(function):
self[name] = DerivedField(name, function, **kwargs)
diff -r 44b9afc5fac8b0efa538e2c9662a7fdeda5f9451 -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -162,12 +162,22 @@
registry.add_field((ftype, "metal_mass"),
function=_metal_mass,
units="g")
+
+ def _number_density(field, data):
+ field_data = np.zeros_like(data["gas", "%s_number_density" % \
+ data.pf.field_info.species_names[0]])
+ for species in data.pf.field_info.species_names:
+ field_data += data["gas", "%s_number_density" % species]
+ return field_data
+ registry.add_field((ftype, "number_density"),
+ function = _number_density,
+ units="cm**-3")
def _mean_molecular_weight(field, data):
return (data[ftype, "density"] / (mh * data[ftype, "number_density"]))
registry.add_field((ftype, "mean_molecular_weight"),
function=_mean_molecular_weight,
- units=r"")
+ units="")
setup_gradient_fields(registry, (ftype, "pressure"), "dyne/cm**2",
slice_info)
diff -r 44b9afc5fac8b0efa538e2c9662a7fdeda5f9451 -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 yt/fields/interpolated_fields.py
--- /dev/null
+++ b/yt/fields/interpolated_fields.py
@@ -0,0 +1,49 @@
+"""
+Fields from interpolating data tables.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.fields.local_fields import add_field
+
+from yt.utilities.linear_interpolators import \
+ UnilinearFieldInterpolator, \
+ BilinearFieldInterpolator, \
+ TrilinearFieldInterpolator
+
+_int_class = {1: UnilinearFieldInterpolator,
+ 2: BilinearFieldInterpolator,
+ 3: TrilinearFieldInterpolator}
+
+def add_interpolated_field(name, units, table_data, axes_data, axes_fields,
+ ftype="gas", particle_type=False, validators=None,
+ truncate=True):
+
+ if len(table_data.shape) not in _int_class:
+ raise RuntimeError("Interpolated field can only be created from 1d, 2d, or 3d data.")
+
+ if len(axes_fields) != len(axes_data) or len(axes_fields) != len(table_data.shape):
+ raise RuntimeError("Data dimension mismatch: data is %d, %d axes data provided, and %d axes fields provided." %
+ (len(table_data.shape), len(axes_data), len(axes_fields)))
+
+ int_class = _int_class[len(table_data.shape)]
+ my_interpolator = int_class(table_data, axes_data, axes_fields, truncate=truncate)
+
+ def _interpolated_field(field, data):
+ return my_interpolator(data)
+ add_field((ftype, name),
+ function=_interpolated_field,
+ units=units,
+ validators=validators, particle_type=particle_type)
+
diff -r 44b9afc5fac8b0efa538e2c9662a7fdeda5f9451 -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -14,15 +14,22 @@
#-----------------------------------------------------------------------------
import numpy as np
+import re
from yt.utilities.physical_constants import \
mh, \
mass_sun_cgs, \
amu_cgs
+from yt.utilities.physical_ratios import \
+ primordial_H_mass_fraction
from yt.funcs import *
from yt.utilities.chemical_formulas import \
ChemicalFormula
+_primordial_mass_fraction = \
+ {"H": primordial_H_mass_fraction,
+ "He" : (1 - primordial_H_mass_fraction)}
+
# See YTEP-0003 for details, but we want to ensure these fields are all
# populated:
#
@@ -50,7 +57,7 @@
weight *= amu_cgs
def _number_density(field, data):
return data[ftype, "%s_density" % species] \
- / amu_cgs
+ / weight
return _number_density
def _create_density_func(ftype, species):
@@ -102,3 +109,53 @@
function = _create_number_density_func(ftype, species),
particle_type = particle_type,
units = "cm**-3")
+
+def add_nuclei_density_fields(registry, ftype,
+ particle_type = False):
+ elements = _get_all_elements(registry.species_names)
+ for element in elements:
+ registry.add_field((ftype, "%s_nuclei_density" % element),
+ function = _nuclei_density,
+ particle_type = particle_type,
+ units = "cm**-3")
+ if len(elements) == 0:
+ for element in ["H", "He"]:
+ registry.add_field((ftype, "%s_nuclei_density" % element),
+ function = _default_nuclei_density,
+ particle_type = particle_type,
+ units = "cm**-3")
+
+def _default_nuclei_density(field, data):
+ element = field.name[1][:field.name[1].find("_")]
+ return data["gas", "density"] * _primordial_mass_fraction[element] / \
+ ChemicalFormula(element).weight / amu_cgs
+
+def _nuclei_density(field, data):
+ element = field.name[1][:field.name[1].find("_")]
+ field_data = np.zeros_like(data["gas", "%s_number_density" %
+ data.pf.field_info.species_names[0]])
+ for species in data.pf.field_info.species_names:
+ nucleus = species
+ if "_" in species:
+ nucleus = species[:species.find("_")]
+ num = _get_element_multiple(nucleus, element)
+ field_data += num * data["gas", "%s_number_density" % species]
+ return field_data
+
+def _get_all_elements(species_list):
+ elements = []
+ for species in species_list:
+ for item in re.findall('[A-Z][a-z]?|[0-9]+', species):
+ if not item.isdigit() and item not in elements \
+ and item != "El":
+ elements.append(item)
+ return elements
+
+def _get_element_multiple(compound, element):
+ my_split = re.findall('[A-Z][a-z]?|[0-9]+', compound)
+ if element not in my_split:
+ return 0
+ loc = my_split.index(element)
+ if loc == len(my_split) - 1 or not my_split[loc + 1].isdigit():
+ return 1
+ return int(my_split[loc + 1])
diff -r 44b9afc5fac8b0efa538e2c9662a7fdeda5f9451 -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -45,7 +45,9 @@
cm_per_mpc
from .fields import \
- BoxlibFieldInfo
+ BoxlibFieldInfo, \
+ MaestroFieldInfo
+
from .io import IOHandlerBoxlib
# This is what we use to find scientific notation that might include d's
# instead of e's.
@@ -457,21 +459,7 @@
elif param == "castro.use_comoving":
vals = self.cosmological_simulation = int(vals)
else:
- # Now we guess some things about the parameter and its type
- v = vals.split()[0] # Just in case there are multiple; we'll go
- # back afterward to using vals.
- try:
- float(v.upper().replace("D","E"))
- except:
- pcast = str
- else:
- syms = (".", "D+", "D-", "E+", "E-")
- if any(sym in v.upper() for sym in syms for v in vals.split()):
- pcast = float
- else:
- pcast = int
- vals = [pcast(v) for v in vals.split()]
- if len(vals) == 1: vals = vals[0]
+ vals = _guess_pcast(vals)
self.parameters[param] = vals
if getattr(self, "cosmological_simulation", 0) == 1:
@@ -759,6 +747,8 @@
class MaestroDataset(BoxlibDataset):
+ _field_info_class = MaestroFieldInfo
+
@classmethod
def _is_valid(cls, *args, **kwargs):
# fill our args
@@ -775,6 +765,19 @@
if any("maestro" in line.lower() for line in lines): return True
return False
+ def _parse_parameter_file(self):
+ super(MaestroDataset, self)._parse_parameter_file()
+ jobinfo_filename = os.path.join(self.output_dir, "job_info")
+ line = ""
+ with open(jobinfo_filename, "r") as f:
+ while not line.startswith(" [*] indicates overridden default"):
+ line = f.next()
+ for line in f:
+ p, v = (_.strip() for _ in line[4:].split("="))
+ if len(v) == 0:
+ self.parameters[p] = ""
+ else:
+ self.parameters[p] = _guess_pcast(v)
class NyxHierarchy(BoxlibHierarchy):
@@ -871,3 +874,23 @@
self.length_unit = YTQuantity(1.0 / (1 + self.current_redshift),
"mpc")
self.velocity_unit = self.length_unit / self.time_unit
+
+def _guess_pcast(vals):
+ # Now we guess some things about the parameter and its type
+ v = vals.split()[0] # Just in case there are multiple; we'll go
+ # back afterward to using vals.
+ try:
+ float(v.upper().replace("D","E"))
+ except:
+ pcast = str
+ if v in ("F", "T"):
+ pcast = bool
+ else:
+ syms = (".", "D+", "D-", "E+", "E-")
+ if any(sym in v.upper() for sym in syms for v in vals.split()):
+ pcast = float
+ else:
+ pcast = int
+ vals = [pcast(v) for v in vals.split()]
+ if len(vals) == 1: vals = vals[0]
+ return vals
diff -r 44b9afc5fac8b0efa538e2c9662a7fdeda5f9451 -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -14,11 +14,16 @@
#-----------------------------------------------------------------------------
import numpy as np
+import string
from yt.utilities.physical_constants import \
mh, boltzmann_constant_cgs, amu_cgs
from yt.fields.field_info_container import \
FieldInfoContainer
+from yt.fields.species_fields import \
+ add_species_field_by_fraction
+from yt.utilities.chemical_formulas import \
+ ChemicalFormula
rho_units = "code_mass / code_length**3"
mom_units = "code_mass / (code_time * code_length**2)"
@@ -108,3 +113,90 @@
self.add_field(("gas", "velocity_%s" % ax),
function = _get_vel(ax),
units = "cm/s")
+
+class MaestroFieldInfo(FieldInfoContainer):
+
+ known_other_fields = (
+ ("density", ("g/cm**3", ["density"], None)),
+ ("x_vel", ("cm/s", ["velocity_x"], None)),
+ ("y_vel", ("cm/s", ["velocity_y"], None)),
+ ("z_vel", ("cm/s", ["velocity_z"], None)),
+ ("magvel", ("cm/s", ["velocity_magnitude"], None)),
+ ("tfromp", ("K", [], None)),
+ ("tfromh", ("K", [], None)),
+ ("Machnumber", ("", ["mach_number"], None)),
+ ("S", ("1/s", [], None)),
+ ("ad_excess", ("", [], "Adiabatic Excess")),
+ ("deltaT", ("", [], None)),
+ ("deltagamma", ("", [], None)),
+ ("deltap", ("", [], None)),
+ ("divw0", ("1/s", [], None)),
+ # Specific entropy
+ ("entropy", ("erg/(g*K)", ["entropy"], None)),
+ ("entropypert", ("", [], None)),
+ ("enucdot", ("ergs/(g*s)", [], None)),
+ ("gpi_x", ("dyne/cm**3", [], None)), # Perturbational pressure grad
+ ("gpi_y", ("dyne/cm**3", [], None)),
+ ("gpi_z", ("dyne/cm**3", [], None)),
+ ("h", ("erg/g", [], "Specific Enthalpy")),
+ ("h0", ("erg/g", [], "Base State Specific Enthalpy")),
+ # Momentum cannot be computed because we need to include base and
+ # full state.
+ ("momentum", ("g*cm/s", ["momentum_magnitude"], None)),
+ ("p0", ("erg/cm**3", [], "p_0")),
+ ("p0pluspi", ("erg/cm**3", [], "p_0 + \pi")),
+ ("pi", ("erg/cm**3", [], None)),
+ ("pioverp0", ("", [], "\pi/p_0")),
+ # Base state density
+ ("rho0", ("g/cm**3", [], "\\rho_0")),
+ ("rhoh", ("erg/cm**3", ["enthalpy_density"], "(\\rho h)")),
+ # Base state enthalpy density
+ ("rhoh0", ("erg/cm**3", [], "(\\rho h)_0")),
+ ("rhohpert", ("erg/cm**3", [], "(\\rho h)^\prime")),
+ ("rhopert", ("g/cm**3", [], "\\rho^\prime")),
+ ("soundspeed", ("cm/s", ["sound_speed"], None)),
+ ("sponge", ("", [], None)),
+ ("tpert", ("K", [], None)),
+ # Again, base state -- so we can't compute ourselves.
+ ("vort", ("1/s", ["vorticity_magnitude"], None)),
+ # Base state
+ ("w0_x", ("cm/s", [], None)),
+ ("w0_y", ("cm/s", [], None)),
+ ("w0_z", ("cm/s", [], None)),
+ )
+
+ def setup_fluid_fields(self):
+ # Add omegadots, units of 1/s
+ if self.pf.parameters["use_tfromp"]:
+ self.alias(("gas", "temperature"), ("boxlib", "tfromp"),
+ units = "K")
+ else:
+ self.alias(("gas", "temperature"), ("boxlib", "tfromh"),
+ units = "K")
+
+ for _, field in self.pf.field_list:
+ if field.startswith("X("):
+ # We have a fraction
+ nice_name = field[2:-1]
+ self.alias(("gas", "%s_fraction" % nice_name), ("boxlib", field),
+ units = "")
+ def _create_density_func(field_name):
+ def _func(field, data):
+ return data[field_name] * data["gas", "density"]
+ return _func
+ func = _create_density_func(("gas", "%s_fraction" % nice_name))
+ self.add_field(name = ("gas", "%s_density" % nice_name),
+ function = func,
+ units = "g/cm**3")
+ # We know this will either have one letter, or two.
+ if field[3] in string.letters:
+ element, weight = field[2:4], field[4:-1]
+ else:
+ element, weight = field[2:3], field[3:-1]
+ weight = int(weight)
+ # Here we can, later, add number density.
+ if field.startswith("omegadot("):
+ nice_name = field[9:-1]
+ self.add_output_field(("boxlib", field), units = "1/s")
+ self.alias(("gas", "%s_creation_rate" % nice_name),
+ ("boxlib", field), units = "1/s")
diff -r 44b9afc5fac8b0efa538e2c9662a7fdeda5f9451 -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -21,6 +21,7 @@
from yt.units.yt_array import \
YTArray
from yt.fields.species_fields import \
+ add_nuclei_density_fields, \
add_species_field_by_density
from yt.utilities.physical_constants import \
mh, me, mp, \
@@ -42,7 +43,7 @@
'HM' : 'H_m1',
'DI' : 'D',
'DII' : 'D_p1',
- 'HD' : 'HD',
+ 'HDI' : 'HD',
'Electron': 'El'
}
@@ -119,8 +120,10 @@
take_log=True,
units="code_mass/code_length**3")
yt_name = known_species_names[species]
- self.alias(("gas", "%s_density" % yt_name),
- ("enzo", "%s_Density" % species))
+ # don't alias electron density since mass is wrong
+ if species != "Electron":
+ self.alias(("gas", "%s_density" % yt_name),
+ ("enzo", "%s_Density" % species))
add_species_field_by_density(self, "gas", yt_name)
def setup_species_fields(self):
@@ -135,7 +138,8 @@
units = "g/cm**3")
for sp in species_names:
self.add_species_field(sp)
-
+ self.species_names.append(known_species_names[sp])
+ add_nuclei_density_fields(self, "gas")
def setup_fluid_fields(self):
# Now we conditionally load a few other things.
diff -r 44b9afc5fac8b0efa538e2c9662a7fdeda5f9451 -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -388,7 +388,7 @@
self.over_refine_factor = over_refine_factor
if field_dtypes is None:
field_dtypes = {}
- success, self.endian = self._validate_header(filename, field_dtypes)
+ success, self.endian = self._validate_header(filename)
if not success:
print "SOMETHING HAS GONE WRONG. NBODIES != SUM PARTICLES."
print "%s != (%s == %s + %s + %s)" % (
@@ -522,7 +522,7 @@
self.time_unit = 1.0 / np.sqrt(G * density_unit)
@staticmethod
- def _validate_header(filename, field_dtypes):
+ def _validate_header(filename):
'''
This method automatically detects whether the tipsy file is big/little endian
and is not corrupt/invalid. It returns a tuple of (Valid, endianswap) where
@@ -534,10 +534,11 @@
except:
return False, 1
try:
- fs = len(f.read())
+ f.seek(0, os.SEEK_END)
+ fs = f.tell()
+ f.seek(0, os.SEEK_SET)
except IOError:
return False, 1
- f.seek(0)
#Read in the header
t, n, ndim, ng, nd, ns = struct.unpack("<diiiii", f.read(28))
endianswap = "<"
@@ -546,16 +547,13 @@
endianswap = ">"
f.seek(0)
t, n, ndim, ng, nd, ns = struct.unpack(">diiiii", f.read(28))
- # Now we construct the sizes of each of the particles.
- dtypes = IOHandlerTipsyBinary._compute_dtypes(field_dtypes, endianswap)
- #Catch for 4 byte padding
- gas_size = dtypes["Gas"].itemsize
- dm_size = dtypes["DarkMatter"].itemsize
- star_size = dtypes["Stars"].itemsize
- if (fs == 32+gas_size*ng+dm_size*nd+star_size*ns):
- f.read(4)
- #File is borked if this is true
- elif (fs != 28+gas_size*ng+dm_size*nd+star_size*ns):
+ # File is borked if this is true. The header is 28 bytes, and may
+ # Be followed by a 4 byte pad. Next comes gas particles, which use
+ # 48 bytes, followed by 36 bytes per dark matter particle, and 44 bytes
+ # per star particle. If positions are stored as doubles, each of these
+ # sizes is increased by 12 bytes.
+ if (fs != 28+48*ng+36*nd+44*ns and fs != 28+60*ng+48*nd+56*ns and
+ fs != 32+48*ng+36*nd+44*ns and fs != 32+60*ng+48*nd+56*ns):
f.close()
return False, 0
f.close()
@@ -563,8 +561,7 @@
@classmethod
def _is_valid(self, *args, **kwargs):
- field_dtypes = kwargs.get("field_dtypes", {})
- return TipsyDataset._validate_header(args[0], field_dtypes)[0]
+ return TipsyDataset._validate_header(args[0])[0]
class HTTPParticleFile(ParticleFile):
pass
diff -r 44b9afc5fac8b0efa538e2c9662a7fdeda5f9451 -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -94,6 +94,7 @@
jansky_cgs = 1.0e-23
# Cosmological constants
rho_crit_g_cm3_h2 = 1.8788e-29
+primordial_H_mass_fraction = 0.76
# Misc. Approximations
mass_mean_atomic_cosmology = 1.22
diff -r 44b9afc5fac8b0efa538e2c9662a7fdeda5f9451 -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -294,8 +294,9 @@
order += [0, 4, 1, 5, 2, 6, 3, 7]
vertices = np.empty([corners.shape[2]*2*12,3])
+ vertices = self.pf.arr(vertices, "code_length")
for i in xrange(3):
- vertices[:,i] = corners[order,i,:].ravel(order='F')
+ vertices[:,i] = corners[order,i,...].ravel(order='F')
px, py, dz = self.project_to_plane(vertices, res=im.shape[:2])
@@ -477,8 +478,9 @@
order += [0, 4, 1, 5, 2, 6, 3, 7]
vertices = np.empty([24,3])
+ vertices = self.pf.arr(vertices, "code_length")
for i in xrange(3):
- vertices[:,i] = corners[order,i,:].ravel(order='F')
+ vertices[:,i] = corners[order,i,...].ravel(order='F')
px, py, dz = self.project_to_plane(vertices, res=im.shape[:2])
https://bitbucket.org/yt_analysis/yt/commits/2a18506ca026/
Changeset: 2a18506ca026
Branch: yt-3.0
User: ngoldbaum
Date: 2014-05-02 21:14:32
Summary: Merging with mainline.
Affected #: 126 files
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5160,3 +5160,4 @@
954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
079e456c38a87676472a458210077e2be325dc85 last_gplv3
+f327552a6ede406b82711fb800ebcd5fe692d1cb yt-3.0a4
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -7,4 +7,9 @@
include doc/extensions/README doc/Makefile
prune doc/source/reference/api/generated
prune doc/build/
+recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
+prune yt/frontends/_skeleton
+prune tests
+graft yt/gui/reason/html/resources
+exclude clean.sh .hgchurn
recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -6,6 +6,7 @@
.. toctree::
:maxdepth: 1
+
halo_catalogs
halo_finding
halo_mass_function
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -1,7 +1,7 @@
{
"metadata": {
"name": "",
- "signature": "sha256:52f186664831f5290b31ec433114927b9771e224bd79d0c82dd3d9a8d9c09bf6"
+ "signature": "sha256:5d881061b9e82bd9df5d3598983c8ddc5fbec35e3bf7ae4524430dc558e27489"
},
"nbformat": 3,
"nbformat_minor": 0,
@@ -307,7 +307,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`:"
+ "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`. You can use these arrays to create dummy arrays with the same units as another array - this is sometimes easier than manually creating a new array or quantity."
]
},
{
@@ -402,11 +402,13 @@
"\n",
"print a/b\n",
"print (a/b).in_cgs()\n",
+ "print (a/b).in_mks()\n",
"print (a/b).in_units('km/s')\n",
"print ''\n",
"\n",
"print a*b\n",
- "print (a*b).in_cgs()"
+ "print (a*b).in_cgs()\n",
+ "print (a*b).in_mks()"
],
"language": "python",
"metadata": {},
@@ -433,7 +435,10 @@
"from yt.utilities.physical_constants import G, kboltz\n",
"\n",
"print \"Newton's constant: \", G\n",
- "print \"Boltzmann constant: \", kboltz"
+ "print \"Newton's constant in MKS: \", G.in_mks(), \"\\n\"\n",
+ "\n",
+ "print \"Boltzmann constant: \", kboltz\n",
+ "print \"Boltzmann constant in MKS: \", kboltz.in_mks()"
],
"language": "python",
"metadata": {},
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
--- a/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
+++ b/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
@@ -1,7 +1,7 @@
{
"metadata": {
"name": "",
- "signature": "sha256:8e1a5db9e3869bcf761ff39c5a95d21458b7c4205f00da3d3f973d398422a466"
+ "signature": "sha256:9e7ac626b3609cf5f3fb2d4ebc6e027ed923ab1c22f0acc212e42fc7535e3205"
},
"nbformat": 3,
"nbformat_minor": 0,
@@ -73,6 +73,7 @@
"mass = dd['cell_mass']\n",
"\n",
"print \"Cell Masses in CGS: \\n\", mass, \"\\n\"\n",
+ "print \"Cell Masses in MKS: \\n\", mass.in_mks(), \"\\n\"\n",
"print \"Cell Masses in Solar Masses: \\n\", mass.in_units('Msun'), \"\\n\"\n",
"print \"Cell Masses in code units: \\n\", mass.in_units('code_mass'), \"\\n\""
],
@@ -87,6 +88,7 @@
"dx = dd['dx']\n",
"print \"Cell dx in code units: \\n\", dx, \"\\n\"\n",
"print \"Cell dx in centimeters: \\n\", dx.in_cgs(), \"\\n\"\n",
+ "print \"Cell dx in meters: \\n\", dx.in_units('m'), \"\\n\"\n",
"print \"Cell dx in megaparsecs: \\n\", dx.in_units('Mpc'), \"\\n\""
],
"language": "python",
@@ -109,8 +111,10 @@
"\n",
"* `in_units`\n",
"* `in_cgs`\n",
+ "* `in_mks`\n",
"* `convert_to_units`\n",
- "* `convert_to_cgs`"
+ "* `convert_to_cgs`\n",
+ "* `convert_to_mks`"
]
},
{
@@ -134,15 +138,16 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "The second, `in_cgs`, returns a copy of the array converted into the base units of yt's CGS unit system:"
+ "`in_cgs` and `in_mks` return a copy of the array converted CGS and MKS units, respectively:"
]
},
{
"cell_type": "code",
"collapsed": false,
"input": [
- "print (dd['pressure']/dd['density'])\n",
- "print (dd['pressure']/dd['density']).in_cgs()"
+ "print (dd['pressure'])\n",
+ "print (dd['pressure']).in_cgs()\n",
+ "print (dd['pressure']).in_mks()"
],
"language": "python",
"metadata": {},
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
--- a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
+++ b/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
@@ -1,7 +1,7 @@
{
"metadata": {
"name": "",
- "signature": "sha256:a07224c25b1d938bc1014b6d9d09c1a2392912f21b821b07615e65302677ef9b"
+ "signature": "sha256:242d7005d45a82744713bfe6389e49d47f39b524d1e7fcbf5ceb2e65dc473e68"
},
"nbformat": 3,
"nbformat_minor": 0,
@@ -20,77 +20,6 @@
"level": 3,
"metadata": {},
"source": [
- "The unit registry"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "When a dataset is loaded, we attempt to detect and assign conversion factors from the internal simulation coordinate system and the physical CGS system. These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols:"
- ]
- },
- {
- "cell_type": "code",
- "collapsed": false,
- "input": [
- "from yt.mods import *\n",
- "\n",
- "ds = load('Enzo_64/DD0043/data0043')\n",
- "\n",
- "ds.unit_registry"
- ],
- "language": "python",
- "metadata": {},
- "outputs": []
- },
- {
- "cell_type": "code",
- "collapsed": false,
- "input": [
- "ds.unit_registry.lut"
- ],
- "language": "python",
- "metadata": {},
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "\n",
- "It is not necessary to specify a unit registry when creating a new `YTArray` or `YTQuantity` since `yt` ships with a default unit registry:"
- ]
- },
- {
- "cell_type": "code",
- "collapsed": false,
- "input": [
- "from yt.units.unit_object import default_unit_registry as reg\n",
- "\n",
- "unit_names = reg.lut.keys()\n",
- "unit_names.sort()\n",
- "\n",
- "# Print out the first 10 unit names\n",
- "for i in range(10):\n",
- " print unit_names[i], reg.lut[unit_names[i]]"
- ],
- "language": "python",
- "metadata": {},
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Each entry in the lookup table is the string name of a base unit and a tuple containing the CGS conversion factor and dimensions of the unit symbol."
- ]
- },
- {
- "cell_type": "heading",
- "level": 3,
- "metadata": {},
- "source": [
"Code units"
]
},
@@ -98,25 +27,6 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Some of the most interesting unit symbols are the ones for \"code\" units:"
- ]
- },
- {
- "cell_type": "code",
- "collapsed": false,
- "input": [
- "code_unit_names = [un for un in unit_names if 'code_' in un]\n",
- "\n",
- "print code_unit_names"
- ],
- "language": "python",
- "metadata": {},
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
"Let's take a look at a cosmological enzo dataset to play with converting between physical units and code units:"
]
},
@@ -132,13 +42,22 @@
"outputs": []
},
{
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The conversion factors between Enzo's internal unit system and the physical CGS system are stored in the dataset's `unit_registry` object. Code units have names like `code_length` and `code_time`. Let's take a look at the names of all of the code units, along with their CGS conversion factors for this cosmological enzo dataset:"
+ ]
+ },
+ {
"cell_type": "code",
"collapsed": false,
"input": [
"reg = ds.unit_registry\n",
"\n",
- "for un in code_unit_names:\n",
- " print un, reg.lut[un]"
+ "for un in reg.keys():\n",
+ " if un.startswith('code_'):\n",
+ " fmt_tup = (un, reg.lut[un][0], reg.lut[un][1])\n",
+ " print \"Unit name: {:<15}\\nCGS conversion: {:<15}\\nDimensions: {:<15}\\n\".format(*fmt_tup)"
],
"language": "python",
"metadata": {},
@@ -295,6 +214,95 @@
"language": "python",
"metadata": {},
"outputs": []
+ },
+ {
+ "cell_type": "heading",
+ "level": 3,
+ "metadata": {},
+ "source": [
+ "The unit registry"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "When you create a `YTArray` without referring to a unit registry, `yt` uses the default unit registry, which does not include code units or comoving units."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "a = YTQuantity(3, 'cm')\n",
+ "\n",
+ "print a.units.registry.keys()"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "When a dataset is loaded, `yt` infers conversion factors from the internal simulation unit system to the CGS unit system. These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols. For the cosmological Enzo dataset we loaded earlier, we can see there are a number of additional unit symbols not defined in the default unit lookup table:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "print sorted([k for k in ds.unit_registry.keys() if k not in a.units.registry.keys()])"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Since code units do not appear in the default unit symbol lookup table, one must explicitly refer to a unit registry when creating a `YTArray` to be able to convert to the unit system of a simulation."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To make this as clean as possible, there are array and quantity-creating convenience functions attached to the `Dataset` object:\n",
+ "\n",
+ "* `ds.arr()`\n",
+ "* `ds.quan()`\n",
+ "\n",
+ "These functions make it straightforward to create arrays and quantities that can be converted to code units or comoving units. For example:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "a = ds.quan(3, 'code_length')\n",
+ "\n",
+ "print a\n",
+ "print a.in_cgs()\n",
+ "print a.in_units('Mpccm/h')"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "b = ds.arr([3, 4, 5], 'Mpccm/h')\n",
+ "print b\n",
+ "print b.in_cgs()"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
}
],
"metadata": {}
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -12,7 +12,7 @@
computation engine, Matplotlib for some visualization tasks and Mercurial for
version control. Because installation of all of these interlocking parts can
be time-consuming, yt provides an installation script which downloads and builds
-a fully-isolated Python + Numpy + Matplotlib + HDF5 + Mercurial installation.
+a fully-isolated Python + NumPy + Matplotlib + HDF5 + Mercurial installation.
yt supports Linux and OSX deployment, with the possibility of deployment on
other Unix-like systems (XSEDE resources, clusters, etc.). Windows is not
supported.
@@ -86,16 +86,41 @@
Alternative Installation Methods
--------------------------------
-If you want to forego the use of the install script, you need to make sure
-you have yt's dependencies installed on your system. These include: a C compiler,
-``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``numpy``, and
-``matplotlib``. From here, you can use ``pip`` (which comes with ``Python``)
-to install yt as:
+If you want to forego the use of the install script, you need to make sure you
+have yt's dependencies installed on your system. These include: a C compiler,
+``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``NumPy``, and
+``matplotlib``. From here, you can use ``pip`` (which comes with ``Python``) to
+install yt as:
.. code-block:: bash
$ pip install yt
+The source code for yt may be found at the Bitbucket project site and can also be
+utilized for installation. If you prefer to use it instead of relying on external
+tools, you will need ``mercurial`` to clone the official repo:
+
+.. code-block:: bash
+
+ $ hg clone https://bitbucket.org/yt_analysis/yt
+ $ cd yt
+ $ hg update yt
+ $ python setup.py install --user
+
+It will install yt into ``$HOME/.local/lib64/python2.7/site-packages``.
+Please refer to ``setuptools`` documentation for the additional options.
+
+Provided that the required dependencies are in a predictable location, yt should
+be able to find them automatically. However, you can manually specify prefix used
+for installation of ``HDF5``, ``Freetype`` and ``libpng`` by using ``hdf5.cfg``,
+``freetype.cfg``, ``png.cfg`` or setting ``HDF5_DIR``, ``FTYPE_DIR``, ``PNG_DIR``
+environmental variables respectively, e.g.
+
+.. code-block:: bash
+
+ $ echo '/usr/local' > hdf5.cfg
+ $ export FTYPE_DIR=/opt/freetype
+
If you choose this installation method, you do not need to run the activation
script as it is unnecessary.
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -660,7 +660,6 @@
~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_passthrough
~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_root_only
~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_simple_proxy
- ~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_splitter
Math Utilities
--------------
@@ -709,8 +708,6 @@
~yt.utilities.parallel_tools.parallel_analysis_interface.ObjectIterator
~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelAnalysisInterface
~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelObjectIterator
- ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ConstructedRootGrid
- ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ExtractedHierarchy
Testing Infrastructure
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -104,42 +104,31 @@
-------------
-.. function:: annotate_hop_circles(self, hop_output, max_number=None, annotate=False, min_size=20, max_size=10000000, font_size=8, print_halo_size=False, print_halo_mass=False, width=None):
+.. function:: annotate_halos(self, halo_catalog, col='white', alpha =1, width= None):
- (This is a proxy for :class:`~yt.visualization.plot_modifications.HopCircleCallback`.)
+ (This is a proxy for :class:`~yt.visualization.plot_modifications.HaloCatalogCallback`.)
- Accepts a :class:`yt.HopList` *hop_output* and plots up
- to *max_number* (None for unlimited) halos as circles.
+ Accepts a :class:`yt.HaloCatalog` *HaloCatalog* and plots
+ a circle at the location of each halo with the radius of
+ the circle corresponding to the virial radius of the halo.
+ If *width* is set to None (default) all halos are plotted.
+ Otherwise, only halos that fall within a slab with width
+ *width* centered on the center of the plot data. The
+ color and transparency of the circles can be controlled with
+ *col* and *alpha* respectively.
.. python-script::
+
+ from yt.mods import *
+ data_pf = load('Enzo_64/RD0006/RD0006')
+ halos_pf = load('rockstar_halos/halos_0.0.bin')
- from yt.mods import *
- pf = load("Enzo_64/DD0043/data0043")
- halos = HaloFinder(pf)
- p = ProjectionPlot(pf, "z", "density")
- p.annotate_hop_circles(halos)
- p.save()
+ hc = HaloCatalog(halos_pf=halos_pf)
+ hc.create()
--------------
-
-.. function:: annotate_hop_particles(self, hop_output, max_number, p_size=1.0, min_size=20, alpha=0.2):
-
- (This is a proxy for :class:`~yt.visualization.plot_modifications.HopParticleCallback`.)
-
- Adds particle positions for the members of each halo as
- identified by HOP. Along *axis* up to *max_number* groups
- in *hop_output* that are larger than *min_size* are
- plotted with *p_size* pixels per particle; *alpha*
- determines the opacity of each particle.
-
-.. python-script::
-
- from yt.mods import *
- pf = load("Enzo_64/DD0043/data0043")
- halos = HaloFinder(pf)
- p = ProjectionPlot(pf, "x", "density", center='m', width=(10, 'Mpc'))
- p.annotate_hop_particles(halos, max_number=100, p_size=5.0)
- p.save()
+ prj = ProjectionPlot(data_pf, 'z', 'density')
+ prj.annotate_halos(hc)
+ prj.save()
-------------
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -154,6 +154,9 @@
from yt.convenience import \
load, simulation
+from yt.testing import \
+ run_nose
+
# Import some helpful math utilities
from yt.utilities.math_utils import \
ortho_find, quartiles, periodic_position
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -4,10 +4,9 @@
from yt.analysis_modules.absorption_spectrum.absorption_line \
import voigt
-
def generate_total_fit(x, fluxData, orderFits, speciesDicts,
- minError=1E-5, complexLim=.999,
- fitLim=.99, minLength=3,
+ minError=1E-4, complexLim=.995,
+ fitLim=.97, minLength=3,
maxLength=1000, splitLim=.99,
output_file=None):
@@ -90,6 +89,7 @@
fluxData[0]=1
fluxData[-1]=1
+
#Find all regions where lines/groups of lines are present
cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
complexLim=complexLim, minLength=minLength,
@@ -111,6 +111,7 @@
yDatBounded=fluxData[b[1]:b[2]]
yFitBounded=yFit[b[1]:b[2]]
+
#Find init redshift
z=(xBounded[yDatBounded.argmin()]-initWl)/initWl
@@ -121,24 +122,33 @@
#Fit Using complex tools
newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded,
- z,fitLim,minError*(b[2]-b[1]),speciesDict)
+ z,fitLim,minError,speciesDict)
+
+ #If flagged as a bad fit, species is lyman alpha,
+ # and it may be a saturated line, use special tools
+ if flag and species=='lya' and min(yDatBounded)<.1:
+ newLinesP=_large_flag_fit(xBounded,yDatBounded,
+ yFitBounded,z,speciesDict,
+ minSize,minError)
+
+ if na.size(newLinesP)> 0:
+
+ #Check for EXPLOOOOSIIONNNSSS
+ newLinesP = _check_numerical_instability(x, newLinesP, speciesDict,b)
+
#Check existence of partner lines if applicable
if len(speciesDict['wavelength']) != 1:
newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData,
- b, minError*(b[2]-b[1]),
- x0, xRes, speciesDict)
+ b, minError, x0, xRes, speciesDict)
- #If flagged as a bad fit, species is lyman alpha,
- # and it may be a saturated line, use special tools
- if flag and species=='lya' and min(yDatBounded)<.1:
- newLinesP=_large_flag_fit(xBounded,yDatBounded,
- yFitBounded,z,speciesDict,
- minSize,minError*(b[2]-b[1]))
+
+
#Adjust total current fit
yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict)
+
#Add new group to all fitted lines
if na.size(newLinesP)>0:
speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
@@ -149,6 +159,7 @@
allSpeciesLines[species]=speciesLines
+
if output_file:
_output_fit(allSpeciesLines, output_file)
@@ -205,10 +216,12 @@
#Setup initial line guesses
if initP==None: #Regular fit
initP = [0,0,0]
- if min(yDat)<.5: #Large lines get larger initial guess
- initP[0] = 10**16
+ if min(yDat)<.01: #Large lines get larger initial guess
+ initP[0] = speciesDict['init_N']*10**2
+ elif min(yDat)<.5:
+ initP[0] = speciesDict['init_N']*10**1
elif min(yDat)>.9: #Small lines get smaller initial guess
- initP[0] = 10**12.5
+ initP[0] = speciesDict['init_N']*10**-1
else:
initP[0] = speciesDict['init_N']
initP[1] = speciesDict['init_b']
@@ -225,9 +238,16 @@
return [],False
#Values to proceed through first run
- errSq,prevErrSq=1,1000
+ errSq,prevErrSq,prevLinesP=1,10*len(x),[]
+ if errBound == None:
+ errBound = len(yDat)*(max(1-yDat)*1E-2)**2
+ else:
+ errBound = errBound*len(yDat)
+
+ flag = False
while True:
+
#Initial parameter guess from joining parameters from all lines
# in lines into a single array
initP = linesP.flatten()
@@ -237,6 +257,7 @@
args=(x,yDat,yFit,speciesDict),
epsfcn=1E-10,maxfev=1000)
+
#Set results of optimization
linesP = na.reshape(fitP,(-1,3))
@@ -247,17 +268,23 @@
#Sum to get idea of goodness of fit
errSq=sum(dif**2)
+ if any(linesP[:,1]==speciesDict['init_b']):
+ # linesP = prevLinesP
+
+ flag = True
+ break
+
#If good enough, break
- if errSq < errBound:
+ if errSq < errBound:
break
#If last fit was worse, reject the last line and revert to last fit
- if errSq > prevErrSq*10:
+ if errSq > prevErrSq*10 :
#If its still pretty damn bad, cut losses and try flag fit tools
if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya':
return [],True
else:
- yNewFit=_gen_flux_lines(x,prevLinesP,speciesDict)
+ linesP = prevLinesP
break
#If too many lines
@@ -266,21 +293,26 @@
if errSq >1E2*errBound and speciesDict['name']=='HI lya':
return [],True
else:
- break
+ flag = True
+ break
#Store previous data in case reject next fit
prevErrSq = errSq
prevLinesP = linesP
-
#Set up initial condition for new line
newP = [0,0,0]
- if min(dif)<.1:
- newP[0]=10**12
- elif min(dif)>.9:
- newP[0]=10**16
+
+ yAdjusted = 1+yFit*yNewFit-yDat
+
+ if min(yAdjusted)<.01: #Large lines get larger initial guess
+ newP[0] = speciesDict['init_N']*10**2
+ elif min(yAdjusted)<.5:
+ newP[0] = speciesDict['init_N']*10**1
+ elif min(yAdjusted)>.9: #Small lines get smaller initial guess
+ newP[0] = speciesDict['init_N']*10**-1
else:
- newP[0]=10**14
+ newP[0] = speciesDict['init_N']
newP[1] = speciesDict['init_b']
newP[2]=(x[dif.argmax()]-wl0)/wl0
linesP=na.append(linesP,[newP],axis=0)
@@ -290,12 +322,12 @@
# acceptable range, as given in dict ref
remove=[]
for i,p in enumerate(linesP):
- check=_check_params(na.array([p]),speciesDict)
+ check=_check_params(na.array([p]),speciesDict,x)
if check:
remove.append(i)
linesP = na.delete(linesP,remove,axis=0)
- return linesP,False
+ return linesP,flag
def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
"""
@@ -489,6 +521,9 @@
#List of lines to remove
removeLines=[]
+ #Set error
+
+
#Iterate through all sets of line parameters
for i,p in enumerate(linesP):
@@ -501,16 +536,23 @@
lb = _get_bounds(p[2],b,wl,x0,xRes)
xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
+ if errBound == None:
+ errBound = 10*len(yb)*(max(1-yb)*1E-2)**2
+ else:
+ errBound = 10*errBound*len(yb)
+
#Generate a fit and find the difference to data
yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
dif =yb-yFitb
+
+
#Only counts as an error if line is too big ---------------<
dif = [k for k in dif if k>0]
err = sum(dif)
#If the fit is too bad then add the line to list of removed lines
- if err > errBound*1E2:
+ if err > errBound:
removeLines.append(i)
break
@@ -640,21 +682,13 @@
#Check if the region needs to be divided
if b[2]-b[1]>maxLength:
- #Find the minimum absorption in the middle two quartiles of
- # the large complex
- q=(b[2]-b[1])/4
- cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+ split = _split_region(yDat,b,splitLim)
- #Only break it up if the minimum absorption is actually low enough
- if yDat[cut]>splitLim:
-
- #Get the new two peaks
- b1Peak = yDat[b[1]:cut].argmin()+b[1]
- b2Peak = yDat[cut:b[2]].argmin()+cut
+ if split:
#add the two regions separately
- cBounds.insert(i+1,[b1Peak,b[1],cut])
- cBounds.insert(i+2,[b2Peak,cut,b[2]])
+ cBounds.insert(i+1,split[0])
+ cBounds.insert(i+2,split[1])
#Remove the original region
cBounds.pop(i)
@@ -663,7 +697,33 @@
return cBounds
-def _gen_flux_lines(x, linesP, speciesDict):
+
+def _split_region(yDat,b,splitLim):
+ #Find the minimum absorption in the middle two quartiles of
+ # the large complex
+
+ q=(b[2]-b[1])/4
+ cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+
+ #Only break it up if the minimum absorption is actually low enough
+ if yDat[cut]>splitLim:
+
+ #Get the new two peaks
+ b1Peak = yDat[b[1]:cut].argmin()+b[1]
+ b2Peak = yDat[cut:b[2]].argmin()+cut
+
+ region_1 = [b1Peak,b[1],cut]
+ region_2 = [b2Peak,cut,b[2]]
+
+ return [region_1,region_2]
+
+ else:
+
+ return []
+
+
+
+def _gen_flux_lines(x, linesP, speciesDict,firstLine=False):
"""
Calculates the normalized flux for a region of wavelength space
generated by a set of absorption lines.
@@ -692,6 +752,9 @@
g=speciesDict['Gamma'][i]
wl=speciesDict['wavelength'][i]
y = y+ _gen_tau(x,p,f,g,wl)
+ if firstLine:
+ break
+
flux = na.exp(-y)
return flux
@@ -744,21 +807,25 @@
the difference between the fit generated by the parameters
given in pTotal multiplied by the previous fit and the desired
flux profile, w/ first index modified appropriately for bad
- parameter choices
+ parameter choices and additional penalty for fitting with a lower
+ flux than observed.
"""
pTotal.shape = (-1,3)
yNewFit = _gen_flux_lines(x,pTotal,speciesDict)
error = yDat-yFit*yNewFit
- error[0] = _check_params(pTotal,speciesDict)
+ error_plus = (yDat-yFit*yNewFit).clip(min=0)
+
+ error = error+error_plus
+ error[0] = _check_params(pTotal,speciesDict,x)
return error
-def _check_params(p, speciesDict):
+def _check_params(p, speciesDict,xb):
"""
Check to see if any of the parameters in p fall outside the range
- given in speciesDict.
+ given in speciesDict or on the boundaries
Parameters
----------
@@ -767,6 +834,8 @@
speciesDict : dictionary
dictionary with properties giving the max and min
values appropriate for each parameter N,b, and z.
+ xb : (N) ndarray
+ wavelength array [nm]
Returns
-------
@@ -774,16 +843,137 @@
0 if all values are fine
999 if any values fall outside acceptable range
"""
+
+ minz = (xb[0])/speciesDict['wavelength'][0]-1
+ maxz = (xb[-1])/speciesDict['wavelength'][0]-1
+
check = 0
- if any(p[:,0] > speciesDict['maxN']) or\
- any(p[:,0] < speciesDict['minN']) or\
- any(p[:,1] > speciesDict['maxb']) or\
- any(p[:,1] < speciesDict['minb']) or\
- any(p[:,2] > speciesDict['maxz']) or\
- any(p[:,2] < speciesDict['minz']):
+ if any(p[:,0] >= speciesDict['maxN']) or\
+ any(p[:,0] <= speciesDict['minN']) or\
+ any(p[:,1] >= speciesDict['maxb']) or\
+ any(p[:,1] <= speciesDict['minb']) or\
+ any(p[:,2] >= maxz) or\
+ any(p[:,2] <= minz):
check = 999
+
return check
+def _check_optimization_init(p,speciesDict,initz,xb,yDat,yFit,minSize,errorBound):
+
+ """
+ Check to see if any of the parameters in p are the
+ same as initial paramters and if so, attempt to
+ split the region and refit it.
+
+ Parameters
+ ----------
+ p : (3,) ndarray
+ array with form [[N1, b1, z1], ...]
+ speciesDict : dictionary
+ dictionary with properties giving the max and min
+ values appropriate for each parameter N,b, and z.
+ x : (N) ndarray
+ wavelength array [nm]
+ """
+
+ # Check if anything is a default parameter
+ if any(p[:,0] == speciesDict['init_N']) or\
+ any(p[:,0] == speciesDict['init_N']*10) or\
+ any(p[:,0] == speciesDict['init_N']*100) or\
+ any(p[:,0] == speciesDict['init_N']*.1) or\
+ any(p[:,1] == speciesDict['init_b']) or\
+ any(p[:,1] == speciesDict['maxb']):
+
+ # These are the initial bounds
+ init_bounds = [yDat.argmin(),0,len(xb)-1]
+
+ # Gratitutous limit for splitting region
+ newSplitLim = 1 - (1-min(yDat))*.5
+
+ # Attempt to split region
+ split = _split_region(yDat,init_bounds,newSplitLim)
+
+ # If we can't split it, just reject it. Its unphysical
+ # to just keep the default parameters and we're out of
+ # options at this point
+ if not split:
+ return []
+
+ # Else set up the bounds for each region and fit separately
+ b1,b2 = split[0][2], split[1][1]
+
+ p1,flag = _complex_fit(xb[:b1], yDat[:b1], yFit[:b1],
+ initz, minSize, errorBound, speciesDict)
+
+ p2,flag = _complex_fit(xb[b2:], yDat[b2:], yFit[b2:],
+ initz, minSize, errorBound, speciesDict)
+
+ # Make the final line parameters. Its annoying because
+ # one or both regions may have fit to nothing
+ if na.size(p1)> 0 and na.size(p2)>0:
+ p = na.r_[p1,p2]
+ elif na.size(p1) > 0:
+ p = p1
+ else:
+ p = p2
+
+ return p
+
+
+def _check_numerical_instability(x, p, speciesDict,b):
+
+ """
+ Check to see if any of the parameters in p are causing
+ unstable numerical effects outside the region of fit
+
+ Parameters
+ ----------
+ p : (3,) ndarray
+ array with form [[N1, b1, z1], ...]
+ speciesDict : dictionary
+ dictionary with properties giving the max and min
+ values appropriate for each parameter N,b, and z.
+ x : (N) ndarray
+ wavelength array [nm]
+ b : (3) list
+ list of integers indicating bounds of region fit in x
+ """
+
+ remove_lines = []
+
+
+ for i,line in enumerate(p):
+
+ # First to check if the line is at risk for instability
+ if line[1]<5 or line[0] < 1E12:
+
+
+ # get all flux that isn't part of fit plus a little wiggle room
+ # max and min to prevent boundary errors
+
+ flux = _gen_flux_lines(x,[line],speciesDict,firstLine=True)
+ flux = na.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
+
+ #Find regions that are absorbing outside the region we fit
+ flux_dif = 1 - flux
+ absorbing_coefficient = max(abs(flux_dif))
+
+
+ #Really there shouldn't be any absorption outside
+ #the region we fit, but we'll give some leeway.
+ #for high resolution spectra the tiny bits on the edges
+ #can give a non negligible amount of flux. Plus the errors
+ #we are looking for are HUGE.
+ if absorbing_coefficient > .1:
+
+ # we just set it to no fit because we've tried
+ # everything else at this point. this region just sucks :(
+ remove_lines.append(i)
+
+ if remove_lines:
+ p = na.delete(p, remove_lines, axis=0)
+
+ return p
def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
"""
@@ -815,4 +1005,5 @@
f.create_dataset("{0}/z".format(ion),data=params['z'])
f.create_dataset("{0}/complex".format(ion),data=params['group#'])
print 'Writing spectrum fit to {0}'.format(file_name)
+ f.close()
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -63,12 +63,6 @@
HaloProfiler, \
FakeProfile
-from .hierarchy_subset.api import \
- ConstructedRootGrid, \
- AMRExtractedGridProxy, \
- ExtractedHierarchy, \
- ExtractedParameterFile
-
from .level_sets.api import \
identify_contours, \
Clump, \
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 yt/analysis_modules/halo_analysis/api.py
--- a/yt/analysis_modules/halo_analysis/api.py
+++ b/yt/analysis_modules/halo_analysis/api.py
@@ -20,6 +20,9 @@
from .halo_callbacks import \
add_callback
+from .halo_finding_methods import \
+ add_finding_method
+
from .halo_filters import \
add_filter
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 yt/analysis_modules/halo_analysis/finding_methods.py
--- a/yt/analysis_modules/halo_analysis/finding_methods.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-Halo Finding methods
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .operator_registry import \
- hf_registry
-
-class HaloFindingMethod(object):
- pass
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -30,20 +30,9 @@
from .operator_registry import \
callback_registry, \
filter_registry, \
- hf_registry, \
+ finding_method_registry, \
quantity_registry
-from yt.analysis_modules.halo_finding.halo_objects import \
- FOFHaloFinder, HOPHaloFinder
-from yt.frontends.halo_catalogs.halo_catalog.data_structures import \
- HaloCatalogDataset
-from yt.frontends.stream.data_structures import \
- load_particles
-from yt.frontends.halo_catalogs.rockstar.data_structures import \
- RockstarDataset
-from yt.analysis_modules.halo_finding.rockstar.api import \
- RockstarHaloFinder
-
class HaloCatalog(ParallelAnalysisInterface):
r"""Create a HaloCatalog: an object that allows for the creation and association
of data with a set of halo objects.
@@ -103,7 +92,7 @@
See Also
--------
- add_callback, add_filter, add_quantity
+ add_callback, add_filter, add_finding_method, add_quantity
"""
@@ -113,7 +102,6 @@
ParallelAnalysisInterface.__init__(self)
self.halos_pf = halos_pf
self.data_pf = data_pf
- self.finder_method = finder_method
self.output_dir = ensure_dir(output_dir)
if os.path.basename(self.output_dir) != ".":
self.output_prefix = os.path.basename(self.output_dir)
@@ -133,6 +121,10 @@
data_source = data_pf.h.all_data()
self.data_source = data_source
+ if finder_method is not None:
+ finder_method = finding_method_registry.find(finder_method)
+ self.finder_method = finder_method
+
# all of the analysis actions to be performed: callbacks, filters, and quantities
self.actions = []
# fields to be written to the halo catalog
@@ -358,16 +350,22 @@
if self.halos_pf is None:
# Find the halos and make a dataset of them
- particles_pf = self.find_halos()
+ self.halos_pf = self.finder_method(self.data_pf)
+ if self.halos_pf is None:
+ mylog.warning('No halos were found for {0}'.format(\
+ self.data_pf.basename))
+ if save_catalog:
+ self.halos_pf = self.data_pf
+ self.save_catalog()
+ self.halos_pf = None
+ return
# Assign pf and data sources appropriately
- self.halos_pf = particles_pf
- self.data_source = particles_pf.all_data()
+ self.data_source = self.halos_pf.all_data()
# Add all of the default quantities that all halos must have
self.add_default_quantities('all')
-
my_index = np.argsort(self.data_source["particle_identifier"])
for i in parallel_objects(my_index, njobs=njobs, dynamic=dynamic):
new_halo = Halo(self)
@@ -400,80 +398,6 @@
if save_catalog:
self.save_catalog()
- def find_halos(self):
-
- finder_method = (self.finder_method).lower()
-
- if finder_method == "hop":
- halo_list = HOPHaloFinder(self.data_pf)
- halos_pf = self._parse_old_halo_list(halo_list)
-
- elif finder_method == "fof":
- halo_list = FOFHaloFinder(self.data_pf)
- halos_pf = self._parse_old_halo_list(halo_list)
-
- elif finder_method == 'rockstar':
- rh = RockstarHaloFinder(self.data_pf,
- outbase='{0}/rockstar_halos'.format(self.output_prefix))
- rh.run()
- halos_pf = RockstarDataset('{0}/rockstar_halos/halos_0.0.bin'.format(self.output_prefix))
- halos_pf.create_field_info()
- else:
- raise RuntimeError("finder_method must be 'fof', 'hop', or 'rockstar'")
-
- for attr in ["current_redshift", "current_time",
- "domain_dimensions",
- "cosmological_simulation", "omega_lambda",
- "omega_matter", "hubble_constant"]:
- attr_val = getattr(self.data_pf, attr)
- setattr(halos_pf, attr, attr_val)
- halos_pf.current_time = halos_pf.current_time.in_cgs()
-
- return halos_pf
-
- def _parse_old_halo_list(self, halo_list):
-
-
- data_pf = self.data_pf
- num_halos = len(halo_list)
-
- # Set up fields that we want to pull from identified halos and their units
- new_fields = ['particle_identifier', 'particle_mass', 'particle_position_x',
- 'particle_position_y','particle_position_z',
- 'virial_radius']
- new_units = [ '', 'g', 'cm', 'cm','cm','cm']
-
- # Set up a dictionary based on those fields
- # with empty arrays where we will fill in their values
- halo_properties = { f : (np.zeros(num_halos),unit) \
- for f, unit in zip(new_fields,new_units)}
-
- # Iterate through the halos pulling out their positions and virial quantities
- # and filling in the properties dictionary
- for i,halo in enumerate(halo_list):
- halo_properties['particle_identifier'][0][i] = i
- halo_properties['particle_mass'][0][i] = halo.virial_mass().in_cgs()
- halo_properties['virial_radius'][0][i] = halo.virial_radius().in_cgs()
-
- com = halo.center_of_mass().in_cgs()
- halo_properties['particle_position_x'][0][i] = com[0]
- halo_properties['particle_position_y'][0][i] = com[1]
- halo_properties['particle_position_z'][0][i] = com[2]
-
- # Define a bounding box based on original data pf
- bbox = np.array([data_pf.domain_left_edge.in_cgs(),
- data_pf.domain_right_edge.in_cgs()]).T
-
- # Create a pf with the halos as particles
- particle_pf = load_particles(halo_properties,
- bbox=bbox, length_unit = 1, mass_unit=1)
-
- # Create the field info dictionary so we can reference those fields
- particle_pf.create_field_info()
-
- return particle_pf
-
-
def save_catalog(self):
"Write out hdf5 file with all halo quantities."
@@ -513,4 +437,3 @@
self.add_quantity("particle_position_z", field_type=field_type)
self.add_quantity("virial_radius", field_type=field_type)
-
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 yt/analysis_modules/halo_analysis/halo_filters.py
--- a/yt/analysis_modules/halo_analysis/halo_filters.py
+++ b/yt/analysis_modules/halo_analysis/halo_filters.py
@@ -13,6 +13,10 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
+import numpy as np
+
+from yt.utilities.spatial import KDTree
+
from .halo_callbacks import HaloCallback
from .operator_registry import filter_registry
@@ -58,3 +62,44 @@
return eval("%s %s %s" % (h_value, operator, value))
add_filter("quantity_value", quantity_value)
+
+def _not_subhalo(halo, field_type="halos"):
+ """
+ Only return true if this halo is not a subhalo.
+
+ This is used for halo finders such as Rockstar that output parent
+ and subhalos together.
+ """
+
+ if not hasattr(halo.halo_catalog, "parent_dict"):
+ halo.halo_catalog.parent_dict = \
+ create_parent_dict(halo.halo_catalog.data_source, ptype=field_type)
+ return halo.halo_catalog.parent_dict[int(halo.quantities["particle_identifier"])] == -1
+add_filter("not_subhalo", _not_subhalo)
+
+def create_parent_dict(data_source, ptype="halos"):
+ """
+ Create a dictionary of halo parents to allow for filtering of subhalos.
+
+ For a pair of halos whose distance is smaller than the radius of at least
+ one of the halos, the parent is defined as the halo with the larger radius.
+ Parent halos (halos with no parents of their own) have parent index values of -1.
+ """
+ pos = np.rollaxis(
+ np.array([data_source[ptype, "particle_position_x"].in_units("Mpc"),
+ data_source[ptype, "particle_position_y"].in_units("Mpc"),
+ data_source[ptype, "particle_position_z"].in_units("Mpc")]), 1)
+ rad = data_source[ptype, "virial_radius"].in_units("Mpc").to_ndarray()
+ ids = data_source[ptype, "particle_identifier"].to_ndarray().astype("int")
+ parents = -1 * np.ones_like(ids, dtype="int")
+ my_tree = KDTree(pos)
+
+ for i in xrange(ids.size):
+ neighbors = np.array(
+ my_tree.query_ball_point(pos[i], rad[i], p=2))
+ if neighbors.size > 1:
+ parents[neighbors] = ids[neighbors[np.argmax(rad[neighbors])]]
+
+ parents[ids == parents] = -1
+ parent_dict = dict(zip(ids, parents))
+ return parent_dict
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 yt/analysis_modules/halo_analysis/halo_finding_methods.py
--- /dev/null
+++ b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
@@ -0,0 +1,141 @@
+"""
+Halo Finding methods
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.analysis_modules.halo_finding.halo_objects import \
+ FOFHaloFinder, HOPHaloFinder
+from yt.frontends.halo_catalogs.halo_catalog.data_structures import \
+ HaloCatalogDataset
+from yt.frontends.stream.data_structures import \
+ load_particles
+
+from .operator_registry import \
+ finding_method_registry
+
+
+def add_finding_method(name, function):
+ finding_method_registry[name] = HaloFindingMethod(function)
+
+class HaloFindingMethod(object):
+ r"""
+ A halo finding method is a callback that performs halo finding on a
+ dataset and returns a new dataset that is the loaded halo finder output.
+ """
+ def __init__(self, function, args=None, kwargs=None):
+ self.function = function
+ self.args = args
+ if self.args is None: self.args = []
+ self.kwargs = kwargs
+ if self.kwargs is None: self.kwargs = {}
+
+ def __call__(self, ds):
+ return self.function(ds, *self.args, **self.kwargs)
+
+def _hop_method(pf):
+ r"""
+ Run the Hop halo finding method.
+ """
+
+ halo_list = HOPHaloFinder(pf)
+ halos_pf = _parse_old_halo_list(pf, halo_list)
+ return halos_pf
+add_finding_method("hop", _hop_method)
+
+def _fof_method(pf):
+ r"""
+ Run the FoF halo finding method.
+ """
+
+ halo_list = FOFHaloFinder(pf)
+ halos_pf = _parse_old_halo_list(pf, halo_list)
+ return halos_pf
+add_finding_method("fof", _fof_method)
+
+def _rockstar_method(pf):
+ r"""
+ Run the Rockstar halo finding method.
+ """
+
+ from yt.frontends.halo_catalogs.rockstar.data_structures import \
+ RockstarDataset
+ from yt.analysis_modules.halo_finding.rockstar.api import \
+ RockstarHaloFinder
+
+ rh = RockstarHaloFinder(pf)
+ rh.run()
+
+
+ halos_pf = RockstarDataset("rockstar_halos/halos_0.0.bin")
+ try:
+ halos_pf.create_field_info()
+ except ValueError:
+ return None
+
+ return halos_pf
+add_finding_method("rockstar", _rockstar_method)
+
+def _parse_old_halo_list(data_pf, halo_list):
+ r"""
+ Convert the halo list into a loaded dataset.
+ """
+
+ num_halos = len(halo_list)
+
+ if num_halos == 0: return None
+
+ # Set up fields that we want to pull from identified halos and their units
+ new_fields = ['particle_identifier', 'particle_mass', 'particle_position_x',
+ 'particle_position_y','particle_position_z',
+ 'virial_radius']
+ new_units = [ '', 'g', 'cm', 'cm','cm','cm']
+
+ # Set up a dictionary based on those fields
+ # with empty arrays where we will fill in their values
+ halo_properties = { f : (np.zeros(num_halos),unit) \
+ for f, unit in zip(new_fields,new_units)}
+
+ # Iterate through the halos pulling out their positions and virial quantities
+ # and filling in the properties dictionary
+ for i,halo in enumerate(halo_list):
+ halo_properties['particle_identifier'][0][i] = i
+ halo_properties['particle_mass'][0][i] = halo.virial_mass().in_cgs()
+ halo_properties['virial_radius'][0][i] = halo.virial_radius().in_cgs()
+
+ com = halo.center_of_mass().in_cgs()
+ halo_properties['particle_position_x'][0][i] = com[0]
+ halo_properties['particle_position_y'][0][i] = com[1]
+ halo_properties['particle_position_z'][0][i] = com[2]
+
+ # Define a bounding box based on original data pf
+ bbox = np.array([data_pf.domain_left_edge.in_cgs(),
+ data_pf.domain_right_edge.in_cgs()]).T
+
+ # Create a pf with the halos as particles
+ particle_pf = load_particles(halo_properties,
+ bbox=bbox, length_unit = 1, mass_unit=1)
+
+ # Create the field info dictionary so we can reference those fields
+ particle_pf.create_field_info()
+
+ for attr in ["current_redshift", "current_time",
+ "domain_dimensions",
+ "cosmological_simulation", "omega_lambda",
+ "omega_matter", "hubble_constant"]:
+ attr_val = getattr(data_pf, attr)
+ setattr(particle_pf, attr, attr_val)
+ particle_pf.current_time = particle_pf.current_time.in_cgs()
+
+ return particle_pf
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 yt/analysis_modules/halo_analysis/operator_registry.py
--- a/yt/analysis_modules/halo_analysis/operator_registry.py
+++ b/yt/analysis_modules/halo_analysis/operator_registry.py
@@ -27,5 +27,5 @@
callback_registry = OperatorRegistry()
filter_registry = OperatorRegistry()
-hf_registry = OperatorRegistry()
+finding_method_registry = OperatorRegistry()
quantity_registry = OperatorRegistry()
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -110,7 +110,9 @@
if self._name == "RockstarHalo":
ds = self.pf.sphere(self.CoM, self._radjust * self.max_radius)
elif self._name == "LoadedHalo":
- ds = self.pf.sphere(self.CoM, self._radjust * self.max_radius)
+ ds = self.pf.sphere(self.CoM, np.maximum(self._radjust * \
+ self.pf.quan(self.max_radius, 'code_length'), \
+ self.pf.index.get_smallest_dx()))
sp_pid = ds['particle_index']
self._ds_sort = sp_pid.argsort()
sp_pid = sp_pid[self._ds_sort]
@@ -129,7 +131,7 @@
"""
if self.CoM is not None:
return self.CoM
- pm = self["ParticleMassMsun"]
+ pm = self["particle_mass"].in_units('Msun')
c = {}
# We shift into a box where the origin is the left edge
c[0] = self["particle_position_x"] - self.pf.domain_left_edge[0]
@@ -199,7 +201,7 @@
"""
if self.group_total_mass is not None:
return self.group_total_mass
- return self["ParticleMassMsun"].sum()
+ return self["particle_mass"].in_units('Msun').sum()
def bulk_velocity(self):
r"""Returns the mass-weighted average velocity in cm/s.
@@ -213,11 +215,11 @@
"""
if self.bulk_vel is not None:
return self.bulk_vel
- pm = self["ParticleMassMsun"]
+ pm = self["particle_mass"].in_units('Msun')
vx = (self["particle_velocity_x"] * pm).sum()
vy = (self["particle_velocity_y"] * pm).sum()
vz = (self["particle_velocity_z"] * pm).sum()
- return np.array([vx, vy, vz]) / pm.sum()
+ return self.pf.arr([vx, vy, vz], vx.units) / pm.sum()
def rms_velocity(self):
r"""Returns the mass-weighted RMS velocity for the halo
@@ -234,7 +236,7 @@
if self.rms_vel is not None:
return self.rms_vel
bv = self.bulk_velocity()
- pm = self["ParticleMassMsun"]
+ pm = self["particle_mass"].in_units('Msun')
sm = pm.sum()
vx = (self["particle_velocity_x"] - bv[0]) * pm / sm
vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
@@ -331,9 +333,11 @@
handle.create_group("/%s" % gn)
for field in ["particle_position_%s" % ax for ax in 'xyz'] \
+ ["particle_velocity_%s" % ax for ax in 'xyz'] \
- + ["particle_index"] + ["ParticleMassMsun"]:
+ + ["particle_index"]:
handle.create_dataset("/%s/%s" % (gn, field), data=self[field])
- if 'creation_time' in self.data.pf.field_list:
+ handle.create_dataset("/%s/particle_mass" % gn,
+ data=self["particle_mass"].in_units('Msun'))
+ if ('io','creation_time') in self.data.pf.field_list:
handle.create_dataset("/%s/creation_time" % gn,
data=self['creation_time'])
n = handle["/%s" % gn]
@@ -464,7 +468,7 @@
if self["particle_position_x"].size > 1:
for index in np.unique(inds):
self.mass_bins[index] += \
- np.sum(self["ParticleMassMsun"][inds == index])
+ np.sum(self["particle_mass"][inds == index]).in_units('Msun')
# Now forward sum the masses in the bins.
for i in xrange(self.bin_count):
self.mass_bins[i + 1] += self.mass_bins[i]
@@ -750,7 +754,7 @@
inds = np.digitize(dist, self.radial_bins) - 1
for index in np.unique(inds):
self.mass_bins[index] += \
- np.sum(self["ParticleMassMsun"][inds == index])
+ np.sum(self["particle_mass"][inds == index]).in_units('Msun')
# Now forward sum the masses in the bins.
for i in xrange(self.bin_count):
self.mass_bins[i + 1] += self.mass_bins[i]
@@ -848,6 +852,7 @@
self._saved_fields = {}
self._ds_sort = None
self._particle_mask = None
+ self._pid_sort = None
def __getitem__(self, key):
@@ -865,14 +870,28 @@
self.size, key)
if field_data is not None:
if key == 'particle_index':
- field_data = field_data[field_data.argsort()]
+ #this is an index for turning data sorted by particle index
+ #into the same order as the fields on disk
+ self._pid_sort = field_data.argsort().argsort()
+ #convert to YTArray using the data from disk
+ if key == 'particle_mass':
+ field_data = self.pf.arr(field_data, 'Msun')
+ else:
+ field_data = self.pf.arr(field_data,
+ self.pf._get_field_info('unknown',key).units)
self._saved_fields[key] = field_data
return self._saved_fields[key]
# We won't store this field below in saved_fields because
# that would mean keeping two copies of it, one in the yt
# machinery and one here.
- ds = self.pf.sphere(self.CoM, 1.05 * self.max_radius)
- return np.take(ds[key][self._ds_sort], self.particle_mask)
+ ds = self.pf.sphere(self.CoM, np.maximum(self._radjust * \
+ self.pf.quan(self.max_radius, 'code_length'), \
+ self.pf.index.get_smallest_dx()))
+ # If particle_mask hasn't been called once then _ds_sort won't have
+ # the proper values set yet
+ if self._particle_mask is None:
+ self.particle_mask
+ return ds[key][self._ds_sort][self.particle_mask][self._pid_sort]
def _get_particle_data(self, halo, fnames, size, field):
# Given a list of file names, a halo, its size, and the desired field,
@@ -1087,10 +1106,10 @@
gc.collect()
def _get_dm_indices(self):
- if 'creation_time' in self._data_source.index.field_list:
+ if ('io','creation_time') in self._data_source.index.field_list:
mylog.debug("Differentiating based on creation time")
return (self._data_source["creation_time"] <= 0)
- elif 'particle_type' in self._data_source.index.field_list:
+ elif ('io','particle_type') in self._data_source.index.field_list:
mylog.debug("Differentiating based on particle type")
return (self._data_source["particle_type"] == 1)
else:
@@ -1356,7 +1375,7 @@
_name = "HOP"
_halo_class = HOPHalo
_fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
- ["ParticleMassMsun"]
+ ["particle_mass"]
def __init__(self, data_source, threshold=160.0, dm_only=True):
self.threshold = threshold
@@ -1368,7 +1387,7 @@
RunHOP(self.particle_fields["particle_position_x"] / self.period[0],
self.particle_fields["particle_position_y"] / self.period[1],
self.particle_fields["particle_position_z"] / self.period[2],
- self.particle_fields["ParticleMassMsun"],
+ self.particle_fields["particle_mass"].in_units('Msun'),
self.threshold)
self.particle_fields["densities"] = self.densities
self.particle_fields["tags"] = self.tags
@@ -1555,7 +1574,7 @@
_name = "parallelHOP"
_halo_class = parallelHOPHalo
_fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
- ["ParticleMassMsun", "particle_index"]
+ ["particle_mass", "particle_index"]
def __init__(self, data_source, padding, num_neighbors, bounds, total_mass,
period, threshold=160.0, dm_only=True, rearrange=True, premerge=True,
@@ -1589,8 +1608,8 @@
self.comm.mpi_exit_test(exit)
# Try to do this in a memory conservative way.
- np.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
- self.particle_fields['ParticleMassMsun'])
+ np.divide(self.particle_fields['particle_mass'].in_units('Msun'), self.total_mass,
+ self.particle_fields['particle_mass'])
np.divide(self.particle_fields["particle_position_x"],
self.old_period[0], self.particle_fields["particle_position_x"])
np.divide(self.particle_fields["particle_position_y"],
@@ -2141,7 +2160,7 @@
elif fancy_padding and self._distributed:
LE_padding = np.empty(3, dtype='float64')
RE_padding = np.empty(3, dtype='float64')
- avg_spacing = (float(vol) / data.size) ** (1. / 3.)
+ avg_spacing = (vol / data.size) ** (1. / 3.)
base_padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
avg_spacing
for dim in xrange(3):
@@ -2190,7 +2209,7 @@
# Now we get the full box mass after we have the final composition of
# subvolumes.
if total_mass is None:
- total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(),
+ total_mass = self.comm.mpi_allreduce((self._data_source["particle_mass"].in_units('Msun').astype('float64')).sum(),
op='sum')
if not self._distributed:
self.padding = (np.zeros(3, dtype='float64'),
@@ -2386,9 +2405,9 @@
if dm_only:
select = self._get_dm_indices()
total_mass = \
- self.comm.mpi_allreduce((self._data_source['all', "ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
+ self.comm.mpi_allreduce((self._data_source['all', "particle_mass"][select].in_units('Msun')).sum(dtype='float64'), op='sum')
else:
- total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0], op='sum')
+ total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("particle_mass").in_units('Msun'), op='sum')
# MJT: Note that instead of this, if we are assuming that the particles
# are all on different processors, we should instead construct an
# object representing the entire domain and sum it "lazily" with
@@ -2409,10 +2428,10 @@
sub_mass = total_mass
elif dm_only:
select = self._get_dm_indices()
- sub_mass = self._data_source["ParticleMassMsun"][select].sum(dtype='float64')
+ sub_mass = self._data_source["particle_mass"][select].in_units('Msun').sum(dtype='float64')
else:
sub_mass = \
- self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0]
+ self._data_source.quantities["TotalQuantity"]("particle_mass").in_units('Msun')
HOPHaloList.__init__(self, self._data_source,
threshold * total_mass / sub_mass, dm_only)
self._parse_halolist(total_mass / sub_mass)
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -53,7 +53,7 @@
self.zpos = particle_fields.pop("particle_position_z")
self.real_size = len(self.xpos)
self.index = particle_fields.pop("particle_index")
- self.mass = particle_fields.pop("ParticleMassMsun")
+ self.mass = particle_fields.pop("particle_mass")
self.padded_particles = []
self.nMerge = 4
self.tree = tree
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -220,12 +220,11 @@
cdef np.int64_t last_fof_tag = 1
cdef np.int64_t k = 0
for i in range(num_particles):
- if fof_tags[i] == 0:
+ if fof_tags[i] < 0:
continue
if fof_tags[i] != last_fof_tag:
last_fof_tag = fof_tags[i]
if k > 16:
- print "Finding subs", k, i
fof_obj.num_p = k
find_subs(&fof_obj)
k = 0
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 yt/analysis_modules/halo_finding/setup.py
--- a/yt/analysis_modules/halo_finding/setup.py
+++ b/yt/analysis_modules/halo_finding/setup.py
@@ -1,9 +1,7 @@
#!/usr/bin/env python
-import setuptools
-import os
-import sys
import os.path
+
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('halo_finding', parent_package, top_path)
@@ -12,6 +10,5 @@
config.add_subpackage("parallel_hop")
if os.path.exists("rockstar.cfg"):
config.add_subpackage("rockstar")
- config.make_config_py() # installs __config__.py
- #config.make_svn_version_py()
+ config.make_config_py() # installs __config__.py
return config
diff -r 51cfb2317ed3f5777eda334bc1cefecee546caa6 -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 yt/analysis_modules/hierarchy_subset/api.py
--- a/yt/analysis_modules/hierarchy_subset/api.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-API for hierarchy_subset
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .hierarchy_subset import \
- ConstructedRootGrid, \
- AMRExtractedGridProxy, \
- ExtractedHierarchy, \
- ExtractedParameterFile
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/869468b46038/
Changeset: 869468b46038
Branch: yt-3.0
User: ngoldbaum
Date: 2014-05-02 21:22:24
Summary: Adding a config option to control the logging stream handler.
Affected #: 2 files
diff -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 -r 869468b460381a8820464c5e63c7598303dc0ad1 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -24,6 +24,7 @@
logfile = 'False',
coloredlogs = 'False',
suppressstreamlogging = 'False',
+ StdoutStreamLogging = 'False',
loglevel = '20',
inline = 'False',
numthreads = '-1',
diff -r 2a18506ca026190e9d3664f5c0e5d4ca784a7706 -r 869468b460381a8820464c5e63c7598303dc0ad1 yt/utilities/logger.py
--- a/yt/utilities/logger.py
+++ b/yt/utilities/logger.py
@@ -46,10 +46,16 @@
level = min(max(ytcfg.getint("yt", "loglevel"), 0), 50)
ufstring = "%(name)-3s: [%(levelname)-9s] %(asctime)s %(message)s"
cfstring = "%(name)-3s: [%(levelname)-18s] %(asctime)s %(message)s"
+
+if ytcfg.getboolean("yt", "StdoutStreamLogging"):
+ stream = sys.stdout
+else:
+ stream = sys.stderr
+
logging.basicConfig(
format=ufstring,
level=level,
- stream=sys.stdout
+ stream=stream,
)
rootLogger = logging.getLogger()
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list