[yt-svn] commit/yt-3.0: 49 new changesets
Bitbucket
commits-noreply at bitbucket.org
Mon Aug 20 12:10:23 PDT 2012
49 new commits in yt-3.0:
https://bitbucket.org/yt_analysis/yt-3.0/changeset/96e6428dc175/
changeset: 96e6428dc175
branch: yt-3.0
user: MatthewTurk
date: 2012-08-20 21:00:25
summary: Initial cylindrical coordinate system import for FLASH data.
affected #: 3 files
diff -r 8eb1c2c3fe440fd3f4e0f17e868d74462f7bbeec -r 96e6428dc1755f5346183b7f65b32857333d7ff0 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -43,7 +43,8 @@
from yt.utilities.io_handler import \
io_registry
-from .fields import FLASHFieldInfo, add_flash_field, KnownFLASHFields
+from .fields import FLASHFieldInfo, add_flash_field, KnownFLASHFields, \
+ CylindricalFLASHFieldInfo
from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
ValidateDataField
@@ -215,7 +216,7 @@
class FLASHStaticOutput(StaticOutput):
_hierarchy_class = FLASHHierarchy
- _fieldinfo_fallback = FLASHFieldInfo
+ #_fieldinfo_fallback = FLASHFieldInfo # Now a property
_fieldinfo_known = KnownFLASHFields
_handle = None
@@ -406,10 +407,19 @@
if dimensionality < 3:
mylog.warning("Guessing dimensionality as %s", dimensionality)
+ self.dimensionality = dimensionality
+
+ self.geometry = self.parameters["geometry"]
+ if self.geometry == "cartesian":
+ self._setup_cartesian_coordinates()
+ elif self.geometry == "cylindrical":
+ self._setup_cylindrical_coordinates()
+ else:
+ raise YTGeometryNotSupported(self.geometry)
+
nblockx = self.parameters["nblockx"]
nblocky = self.parameters["nblocky"]
nblockz = self.parameters["nblockz"]
- self.dimensionality = dimensionality
self.domain_dimensions = \
na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
try:
@@ -431,6 +441,24 @@
self.current_redshift = self.omega_lambda = self.omega_matter = \
self.hubble_constant = self.cosmological_simulation = 0.0
+ def _setup_cartesian_coordinates(self):
+ pass
+
+ def _setup_cylindrical_coordinates(self):
+ if self.dimensionality == 2:
+ self.domain_left_edge[2] = 0.0
+ self.domain_right_edge[2] = 2.0 * na.pi
+
+ @property
+ def _fieldinfo_fallback(self):
+ geom = self.parameters.get("geometry", "cartesian")
+ if geom == "cartesian":
+ return FLASHFieldInfo
+ elif geom == "cylindrical":
+ return CylindricalFLASHFieldInfo
+ else:
+ raise RuntimeError
+
def __del__(self):
self._handle.close()
diff -r 8eb1c2c3fe440fd3f4e0f17e868d74462f7bbeec -r 96e6428dc1755f5346183b7f65b32857333d7ff0 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -36,12 +36,16 @@
import yt.data_objects.universal_fields
from yt.utilities.physical_constants import \
kboltz
+import numpy as na
KnownFLASHFields = FieldInfoContainer()
add_flash_field = KnownFLASHFields.add_field
FLASHFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
add_field = FLASHFieldInfo.add_field
+CylindricalFLASHFieldInfo = FieldInfoContainer.create_with_fallback(FLASHFieldInfo)
+add_cyl_field = CylindricalFLASHFieldInfo.add_field
+
# Common fields in FLASH: (Thanks to John ZuHone for this list)
#
# dens gas mass density (g/cc) --
@@ -254,3 +258,51 @@
add_field("GasEnergy", function=_GasEnergy,
units=r"\rm{ergs}/\rm{g}")
+
+def _unknown_coord(field, data):
+ raise RuntimeError
+add_cyl_field("dx", function=_unknown_coord)
+add_cyl_field("dy", function=_unknown_coord)
+
+def _dr(field, data):
+ return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
+add_cyl_field('dr', function=_dr, display_field=False,
+ validators=[ValidateSpatial(0)])
+
+def _dz(field, data):
+ return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
+add_cyl_field('dz', function=_dz,
+ display_field=False, validators=[ValidateSpatial(0)])
+
+def _dtheta(field, data):
+ return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
+add_cyl_field('dtheta', function=_dtheta,
+ display_field=False, validators=[ValidateSpatial(0)])
+
+def _coordR(field, data):
+ dim = data.ActiveDimensions[0]
+ return (na.ones(data.ActiveDimensions, dtype='float64')
+ * na.arange(data.ActiveDimensions[0])[:,None,None]
+ +0.5) * data['dr'] + data.LeftEdge[0]
+add_cyl_field('r', function=_coordR, display_field=False,
+ validators=[ValidateSpatial(0)])
+
+def _coordZ(field, data):
+ dim = data.ActiveDimensions[1]
+ return (na.ones(data.ActiveDimensions, dtype='float64')
+ * na.arange(data.ActiveDimensions[1])[None,:,None]
+ +0.5) * data['dz'] + data.LeftEdge[1]
+add_cyl_field('z', function=_coordZ, display_field=False,
+ validators=[ValidateSpatial(0)])
+
+def _coordTheta(field, data):
+ dim = data.ActiveDimensions[2]
+ return (na.ones(data.ActiveDimensions, dtype='float64')
+ * na.arange(data.ActiveDimensions[2])[None,:,None]
+ +0.5) * data['dtheta'] + data.LeftEdge[2]
+add_cyl_field('z', function=_coordZ, display_field=False,
+ validators=[ValidateSpatial(0)])
+
+def _CylindricalVolume(field, data):
+ return data["dtheta"] * data["r"] * data["dr"] * data["dz"]
+add_cyl_field("CellVolume", function=_CylindricalVolume)
diff -r 8eb1c2c3fe440fd3f4e0f17e868d74462f7bbeec -r 96e6428dc1755f5346183b7f65b32857333d7ff0 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -125,3 +125,10 @@
return "You have not declared yourself to be inside the IPython" + \
"Notebook. Do so with this command:\n\n" + \
"ytcfg['yt','ipython_notebook'] = 'True'"
+
+class YTGeometryNotSupported(YTException):
+ def __init__(self, geom):
+ self.geom = geom
+
+ def __str__(self):
+ return "We don't currently support %s geometry" % self.geom
https://bitbucket.org/yt_analysis/yt-3.0/changeset/494dd6627f57/
changeset: 494dd6627f57
branch: yt
user: MatthewTurk
date: 2012-08-02 12:04:59
summary: Off-axis slices should be *centered* at the center, but the bounds will be with
respect to 0. So bounds should be calculated that way, rather than absolutely.
affected #: 1 file
diff -r d28068beb5aceb01702502ecf7e86ecd0afbf0eb -r 494dd6627f57c3b9896c5b32d37adc7930610f6f yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -179,8 +179,7 @@
center = na.dot(mat,center)
width = width/pf.domain_width.min()
- bounds = [center[0]-width[0]/2,center[0]+width[0]/2,
- center[1]-width[1]/2,center[1]+width[1]/2]
+ bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
return (bounds,center)
https://bitbucket.org/yt_analysis/yt-3.0/changeset/ef282894f4d9/
changeset: ef282894f4d9
branch: yt
user: MatthewTurk
date: 2012-08-02 12:21:52
summary: This fixes a bug that I introduced where slices and cutting planes had no units
displayed.
affected #: 1 file
diff -r 494dd6627f57c3b9896c5b32d37adc7930610f6f -r ef282894f4d981379c19f3728af891d5626cd520 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -550,7 +550,7 @@
pf = self.pf
if ds._type_name in ("slice", "cutting"):
units = pf.field_info[field].get_units()
- if ds._type_name == "proj" and (ds.weight_field is not None or
+ elif ds._type_name == "proj" and (ds.weight_field is not None or
ds.proj_style == "mip"):
units = pf.field_info[field].get_units()
elif ds._type_name == "proj":
https://bitbucket.org/yt_analysis/yt-3.0/changeset/076cec2c57d2/
changeset: 076cec2c57d2
branch: stable
user: MatthewTurk
date: 2012-08-02 20:22:45
summary: Merging from development branch for the 2.4 release.
affected #: 466 files
Diff too large to display.
https://bitbucket.org/yt_analysis/yt-3.0/changeset/4f3abfe167c0/
changeset: 4f3abfe167c0
branch: stable
user: MatthewTurk
date: 2012-08-02 20:22:49
summary: Added tag yt-2.4 for changeset 076cec2c57d2
affected #: 1 file
diff -r 076cec2c57d2e4b508babbfd661f5daa1e34ec80 -r 4f3abfe167c0eb7cae09947944877a1e4f86eced .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5157,3 +5157,4 @@
a2b3521b1590c25029ca0bc602ad6cb7ae7b8ba2 yt-2.1
41bd8aacfbc81fa66d7a3f2cd2880f10c3e237a4 yt-2.2
3836676ee6307f9caf5ccdb0f0dd373676a68535 yt-2.3
+076cec2c57d2e4b508babbfd661f5daa1e34ec80 yt-2.4
https://bitbucket.org/yt_analysis/yt-3.0/changeset/6be71495cd03/
changeset: 6be71495cd03
branch: yt
user: MatthewTurk
date: 2012-08-02 20:23:26
summary: Updating development branch to be 2.5dev
affected #: 1 file
diff -r ef282894f4d981379c19f3728af891d5626cd520 -r 6be71495cd039b5b433ff3df7095c77274b0e837 setup.py
--- a/setup.py
+++ b/setup.py
@@ -106,7 +106,7 @@
import setuptools
-VERSION = "2.4dev"
+VERSION = "2.5dev"
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
https://bitbucket.org/yt_analysis/yt-3.0/changeset/946c6690c403/
changeset: 946c6690c403
branch: yt
user: MatthewTurk
date: 2012-08-02 23:28:57
summary: Fixing a NumPy dtype issue in the uploading of metadata for FLASH simulations.
affected #: 1 file
diff -r 6be71495cd039b5b433ff3df7095c77274b0e837 -r 946c6690c4033e304ffa2637573c505d1756c2ff yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -99,6 +99,8 @@
for i in metadata:
if isinstance(metadata[i], na.ndarray):
metadata[i] = metadata[i].tolist()
+ elif hasattr(metadata[i], 'dtype'):
+ metadata[i] = na.asscalar(metadata[i])
metadata['obj_type'] = self.type
if len(chunks) == 0:
chunk_info = {'chunks': []}
https://bitbucket.org/yt_analysis/yt-3.0/changeset/e55966f1addd/
changeset: e55966f1addd
branch: stable
user: MatthewTurk
date: 2012-08-02 23:28:57
summary: Fixing a NumPy dtype issue in the uploading of metadata for FLASH simulations.
affected #: 1 file
diff -r 4f3abfe167c0eb7cae09947944877a1e4f86eced -r e55966f1addde8b88e75fbf82d4170e6b4426416 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -99,6 +99,8 @@
for i in metadata:
if isinstance(metadata[i], na.ndarray):
metadata[i] = metadata[i].tolist()
+ elif hasattr(metadata[i], 'dtype'):
+ metadata[i] = na.asscalar(metadata[i])
metadata['obj_type'] = self.type
if len(chunks) == 0:
chunk_info = {'chunks': []}
https://bitbucket.org/yt_analysis/yt-3.0/changeset/5d442fd44c20/
changeset: 5d442fd44c20
branch: yt
user: jmoloney
date: 2012-08-02 22:07:37
summary: Fixed 'next' and 'previous' pointers in cosmology splices to point to the correct outputs.
affected #: 1 file
diff -r 6be71495cd039b5b433ff3df7095c77274b0e837 -r 5d442fd44c20f43cc484cec6ea76661502ad96a3 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -184,7 +184,23 @@
mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
(len(cosmology_splice), far_redshift, near_redshift))
-
+
+ # change the 'next' and 'previous' pointers to point to the correct outputs for the created
+ # splice
+ for i, output in enumerate(cosmology_splice):
+ if len(cosmology_splice) == 1:
+ output['previous'] = None
+ output['next'] = None
+ elif i == 0:
+ output['previous'] = None
+ output['next'] = cosmology_splice[i + 1]
+ elif i == len(cosmology_splice) - 1:
+ output['previous'] = cosmology_splice[i - 1]
+ output['next'] = None
+ else:
+ output['previous'] = cosmology_splice[i - 1]
+ output['next'] = cosmology_splice[i + 1]
+
self.splice_outputs.sort(key=lambda obj: obj['time'])
return cosmology_splice
https://bitbucket.org/yt_analysis/yt-3.0/changeset/b1515007cc61/
changeset: b1515007cc61
branch: yt
user: brittonsmith
date: 2012-08-02 23:31:50
summary: Merged in jmoloney/yt (pull request #234)
affected #: 1 file
diff -r 946c6690c4033e304ffa2637573c505d1756c2ff -r b1515007cc61e41bda16e01cdc0579524eb14678 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -184,7 +184,23 @@
mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
(len(cosmology_splice), far_redshift, near_redshift))
-
+
+ # change the 'next' and 'previous' pointers to point to the correct outputs for the created
+ # splice
+ for i, output in enumerate(cosmology_splice):
+ if len(cosmology_splice) == 1:
+ output['previous'] = None
+ output['next'] = None
+ elif i == 0:
+ output['previous'] = None
+ output['next'] = cosmology_splice[i + 1]
+ elif i == len(cosmology_splice) - 1:
+ output['previous'] = cosmology_splice[i - 1]
+ output['next'] = None
+ else:
+ output['previous'] = cosmology_splice[i - 1]
+ output['next'] = cosmology_splice[i + 1]
+
self.splice_outputs.sort(key=lambda obj: obj['time'])
return cosmology_splice
https://bitbucket.org/yt_analysis/yt-3.0/changeset/4fc8fc9b8c10/
changeset: 4fc8fc9b8c10
branch: stable
user: jmoloney
date: 2012-08-02 22:07:37
summary: Fixed 'next' and 'previous' pointers in cosmology splices to point to the correct outputs.
affected #: 1 file
diff -r e55966f1addde8b88e75fbf82d4170e6b4426416 -r 4fc8fc9b8c10170b232e0aba44989477be82e9bb yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -184,7 +184,23 @@
mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
(len(cosmology_splice), far_redshift, near_redshift))
-
+
+ # change the 'next' and 'previous' pointers to point to the correct outputs for the created
+ # splice
+ for i, output in enumerate(cosmology_splice):
+ if len(cosmology_splice) == 1:
+ output['previous'] = None
+ output['next'] = None
+ elif i == 0:
+ output['previous'] = None
+ output['next'] = cosmology_splice[i + 1]
+ elif i == len(cosmology_splice) - 1:
+ output['previous'] = cosmology_splice[i - 1]
+ output['next'] = None
+ else:
+ output['previous'] = cosmology_splice[i - 1]
+ output['next'] = cosmology_splice[i + 1]
+
self.splice_outputs.sort(key=lambda obj: obj['time'])
return cosmology_splice
https://bitbucket.org/yt_analysis/yt-3.0/changeset/e185e61e4171/
changeset: e185e61e4171
branch: yt
user: ngoldbaum
date: 2012-08-04 00:14:15
summary: Fixing a bug pointed out on the mailing list by Sherwood Richers.
affected #: 1 file
diff -r 3c2ce08a863303a80b057f8f9fba38af0866b5b6 -r e185e61e4171d788ca5c2643d483acd3f5f5d2a7 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1236,7 +1236,7 @@
plt = SlicePlot(pf, ax, args.field, center=center,
width=width)
if args.grids:
- plt.draw_grids()
+ plt.annotate_grids()
if args.time:
time = pf.current_time*pf['Time']*pf['years']
plt.annotate_text((0.2,0.8), 't = %5.2e yr'%time)
https://bitbucket.org/yt_analysis/yt-3.0/changeset/b950249e356e/
changeset: b950249e356e
branch: yt
user: ngoldbaum
date: 2012-08-04 00:14:50
summary: Merging
affected #: 6 files
diff -r e185e61e4171d788ca5c2643d483acd3f5f5d2a7 -r b950249e356e67e8c101606a73cc43fdbc9bfe79 setup.py
--- a/setup.py
+++ b/setup.py
@@ -106,7 +106,7 @@
import setuptools
-VERSION = "2.4dev"
+VERSION = "2.5dev"
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
diff -r e185e61e4171d788ca5c2643d483acd3f5f5d2a7 -r b950249e356e67e8c101606a73cc43fdbc9bfe79 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -184,7 +184,23 @@
mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
(len(cosmology_splice), far_redshift, near_redshift))
-
+
+ # change the 'next' and 'previous' pointers to point to the correct outputs for the created
+ # splice
+ for i, output in enumerate(cosmology_splice):
+ if len(cosmology_splice) == 1:
+ output['previous'] = None
+ output['next'] = None
+ elif i == 0:
+ output['previous'] = None
+ output['next'] = cosmology_splice[i + 1]
+ elif i == len(cosmology_splice) - 1:
+ output['previous'] = cosmology_splice[i - 1]
+ output['next'] = None
+ else:
+ output['previous'] = cosmology_splice[i - 1]
+ output['next'] = cosmology_splice[i + 1]
+
self.splice_outputs.sort(key=lambda obj: obj['time'])
return cosmology_splice
diff -r e185e61e4171d788ca5c2643d483acd3f5f5d2a7 -r b950249e356e67e8c101606a73cc43fdbc9bfe79 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -691,7 +691,7 @@
malloc(sizeof(FieldInterpolationTable) * 6)
self.vra.n_fits = tf_obj.n_field_tables
assert(self.vra.n_fits <= 6)
- self.vra.grey_opacity = tf_obj.grey_opacity
+ self.vra.grey_opacity = getattr(tf_obj, "grey_opacity", 0)
self.vra.n_samples = n_samples
self.my_field_tables = []
for i in range(self.vra.n_fits):
@@ -757,7 +757,7 @@
malloc(sizeof(FieldInterpolationTable) * 6)
self.vra.n_fits = tf_obj.n_field_tables
assert(self.vra.n_fits <= 6)
- self.vra.grey_opacity = tf_obj.grey_opacity
+ self.vra.grey_opacity = getattr(tf_obj, "grey_opacity", 0)
self.vra.n_samples = n_samples
self.vra.light_dir = <np.float64_t *> malloc(sizeof(np.float64_t) * 3)
self.vra.light_rgba = <np.float64_t *> malloc(sizeof(np.float64_t) * 4)
diff -r e185e61e4171d788ca5c2643d483acd3f5f5d2a7 -r b950249e356e67e8c101606a73cc43fdbc9bfe79 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -99,6 +99,8 @@
for i in metadata:
if isinstance(metadata[i], na.ndarray):
metadata[i] = metadata[i].tolist()
+ elif hasattr(metadata[i], 'dtype'):
+ metadata[i] = na.asscalar(metadata[i])
metadata['obj_type'] = self.type
if len(chunks) == 0:
chunk_info = {'chunks': []}
diff -r e185e61e4171d788ca5c2643d483acd3f5f5d2a7 -r b950249e356e67e8c101606a73cc43fdbc9bfe79 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -179,8 +179,7 @@
center = na.dot(mat,center)
width = width/pf.domain_width.min()
- bounds = [center[0]-width[0]/2,center[0]+width[0]/2,
- center[1]-width[1]/2,center[1]+width[1]/2]
+ bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
return (bounds,center)
@@ -551,7 +550,7 @@
pf = self.pf
if ds._type_name in ("slice", "cutting"):
units = pf.field_info[field].get_units()
- if ds._type_name == "proj" and (ds.weight_field is not None or
+ elif ds._type_name == "proj" and (ds.weight_field is not None or
ds.proj_style == "mip"):
units = pf.field_info[field].get_units()
elif ds._type_name == "proj":
diff -r e185e61e4171d788ca5c2643d483acd3f5f5d2a7 -r b950249e356e67e8c101606a73cc43fdbc9bfe79 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -823,14 +823,14 @@
# This assumes Density; this is a relatively safe assumption.
import matplotlib.figure
import matplotlib.backends.backend_agg
- phi, theta = na.mgrid[0.0:2*pi:800j, 0:pi:800j]
+ phi, theta = na.mgrid[0.0:2*na.pi:800j, 0:na.pi:800j]
pixi = arr_ang2pix_nest(self.nside, theta.ravel(), phi.ravel())
image *= self.radius * self.pf['cm']
img = na.log10(image[:,0,0][pixi]).reshape((800,800))
fig = matplotlib.figure.Figure((10, 5))
ax = fig.add_subplot(1,1,1,projection='hammer')
- implot = ax.imshow(img, extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
+ implot = ax.imshow(img, extent=(-na.pi,na.pi,-na.pi/2,na.pi/2), clip_on=False, aspect=0.5)
cb = fig.colorbar(implot, orientation='horizontal')
cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
if clim is not None: cb.set_clim(*clim)
@@ -1460,6 +1460,8 @@
vs2 = vs.copy()
for i in range(3):
vs[:,:,i] = (vs2 * rotation[:,i]).sum(axis=2)
+ else:
+ vs += 1e-8
positions = na.ones((nv, 1, 3), dtype='float64', order='C') * center
dx = min(g.dds.min() for g in pf.h.find_point(center)[0])
positions += inner_radius * dx * vs
@@ -1490,7 +1492,7 @@
for g in pf.h.grids:
if "temp_weightfield" in g.keys():
del g["temp_weightfield"]
- return image
+ return image[:,0,0]
def plot_allsky_healpix(image, nside, fn, label = "", rotation = None,
take_log = True, resolution=512):
@@ -1504,7 +1506,8 @@
ax = fig.add_subplot(1,1,1,projection='aitoff')
if take_log: func = na.log10
else: func = lambda a: a
- implot = ax.imshow(func(img), extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
+ implot = ax.imshow(func(img), extent=(-na.pi,na.pi,-na.pi/2,na.pi/2),
+ clip_on=False, aspect=0.5)
cb = fig.colorbar(implot, orientation='horizontal')
cb.set_label(label)
ax.xaxis.set_ticks(())
https://bitbucket.org/yt_analysis/yt-3.0/changeset/9a736000a652/
changeset: 9a736000a652
branch: stable
user: ngoldbaum
date: 2012-08-04 00:14:15
summary: Fixing a bug pointed out on the mailing list by Sherwood Richers.
affected #: 1 file
diff -r 4fc8fc9b8c10170b232e0aba44989477be82e9bb -r 9a736000a6523f75744d9a9b0a0d538fa34ab3bc yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1236,7 +1236,7 @@
plt = SlicePlot(pf, ax, args.field, center=center,
width=width)
if args.grids:
- plt.draw_grids()
+ plt.annotate_grids()
if args.time:
time = pf.current_time*pf['Time']*pf['years']
plt.annotate_text((0.2,0.8), 't = %5.2e yr'%time)
https://bitbucket.org/yt_analysis/yt-3.0/changeset/6d195b8f68da/
changeset: 6d195b8f68da
branch: yt
user: MatthewTurk
date: 2012-08-05 16:47:41
summary: Adding uuid-dev to the Ubuntu notes in install_script.sh.
affected #: 1 file
diff -r b950249e356e67e8c101606a73cc43fdbc9bfe79 -r 6d195b8f68daa0e157601a595091801293dc9e57 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -213,10 +213,11 @@
echo " * libncurses5"
echo " * libncurses5-dev"
echo " * zip"
+ echo " * uuid-dev"
echo
echo "You can accomplish this by executing:"
echo
- echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip"
+ echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev"
echo
fi
if [ ! -z "${CFLAGS}" ]
https://bitbucket.org/yt_analysis/yt-3.0/changeset/f3433665ccaf/
changeset: f3433665ccaf
branch: stable
user: MatthewTurk
date: 2012-08-05 16:47:41
summary: Adding uuid-dev to the Ubuntu notes in install_script.sh.
affected #: 1 file
diff -r 9a736000a6523f75744d9a9b0a0d538fa34ab3bc -r f3433665ccaf898fcb4f1dc2addf0248fa8a74cc doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -213,10 +213,11 @@
echo " * libncurses5"
echo " * libncurses5-dev"
echo " * zip"
+ echo " * uuid-dev"
echo
echo "You can accomplish this by executing:"
echo
- echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip"
+ echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev"
echo
fi
if [ ! -z "${CFLAGS}" ]
https://bitbucket.org/yt_analysis/yt-3.0/changeset/c9ac1da6c1c4/
changeset: c9ac1da6c1c4
branch: yt
user: xarthisius
date: 2012-07-17 05:07:38
summary: [frontends/gdf] define all available length units using cm
affected #: 1 file
diff -r b1515007cc61e41bda16e01cdc0579524eb14678 -r c9ac1da6c1c42bd8ad518d390870b3f2ea962971 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -38,7 +38,7 @@
from yt.data_objects.static_output import \
StaticOutput
from yt.utilities.definitions import \
- sec_conversion
+ mpc_conversion, sec_conversion
from .fields import GDFFieldInfo, KnownGDFFields
from yt.data_objects.field_info_container import \
@@ -172,6 +172,8 @@
self.units['1'] = 1.0
self.units['cm'] = 1.0
self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+ for unit in mpc_conversion.keys():
+ self.units[unit] = 1.0 * mpc_conversion[unit] / mpc_conversion["cm"]
for unit in sec_conversion.keys():
self.time_units[unit] = 1.0 / sec_conversion[unit]
https://bitbucket.org/yt_analysis/yt-3.0/changeset/0fae6c70a51b/
changeset: 0fae6c70a51b
branch: yt
user: MatthewTurk
date: 2012-08-06 12:17:56
summary: Merged in xarthisius/yt (pull request #235)
affected #: 1 file
diff -r 6d195b8f68daa0e157601a595091801293dc9e57 -r 0fae6c70a51b683546489137ac61184b3c11b39a yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -38,7 +38,7 @@
from yt.data_objects.static_output import \
StaticOutput
from yt.utilities.definitions import \
- sec_conversion
+ mpc_conversion, sec_conversion
from .fields import GDFFieldInfo, KnownGDFFields
from yt.data_objects.field_info_container import \
@@ -172,6 +172,8 @@
self.units['1'] = 1.0
self.units['cm'] = 1.0
self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+ for unit in mpc_conversion.keys():
+ self.units[unit] = 1.0 * mpc_conversion[unit] / mpc_conversion["cm"]
for unit in sec_conversion.keys():
self.time_units[unit] = 1.0 / sec_conversion[unit]
https://bitbucket.org/yt_analysis/yt-3.0/changeset/c98bd507540c/
changeset: c98bd507540c
branch: yt
user: MatthewTurk
date: 2012-08-06 20:51:34
summary: Adding check for Mountain Lion compilation:
MPL_SUPP_CFLAGS="-mmacosx-version-min=10.7"
MPL_SUPP_CXXFLAGS="-mmacosx-version-min=10.7"
affected #: 1 file
diff -r 0fae6c70a51b683546489137ac61184b3c11b39a -r c98bd507540cd694876ec036d5ca2a270ecb88b1 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -201,6 +201,12 @@
echo "$ export CC=gcc-4.2"
echo "$ export CXX=g++-4.2"
echo
+ OSX_VERSION=`sw_vers -productVersion`
+ if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
+ then
+ MPL_SUPP_CFLAGS="${MPL_SUPP_CFLAGS} -mmacosx-version-min=10.7"
+ MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
+ fi
fi
if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
then
https://bitbucket.org/yt_analysis/yt-3.0/changeset/15bfbdcf71d7/
changeset: 15bfbdcf71d7
branch: stable
user: MatthewTurk
date: 2012-08-06 20:51:34
summary: Adding check for Mountain Lion compilation:
MPL_SUPP_CFLAGS="-mmacosx-version-min=10.7"
MPL_SUPP_CXXFLAGS="-mmacosx-version-min=10.7"
affected #: 1 file
diff -r f3433665ccaf898fcb4f1dc2addf0248fa8a74cc -r 15bfbdcf71d775096c3649926c2ec194b1e69c79 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -201,6 +201,12 @@
echo "$ export CC=gcc-4.2"
echo "$ export CXX=g++-4.2"
echo
+ OSX_VERSION=`sw_vers -productVersion`
+ if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
+ then
+ MPL_SUPP_CFLAGS="${MPL_SUPP_CFLAGS} -mmacosx-version-min=10.7"
+ MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
+ fi
fi
if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
then
https://bitbucket.org/yt_analysis/yt-3.0/changeset/e6227ef4338b/
changeset: e6227ef4338b
branch: stable
user: MatthewTurk
date: 2012-08-07 20:19:36
summary: Disabling Maestro imports for now.
affected #: 1 file
diff -r 15bfbdcf71d775096c3649926c2ec194b1e69c79 -r e6227ef4338b6cc70de051951f2f17bdfb494ba6 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -98,8 +98,8 @@
from yt.frontends.art.api import \
ARTStaticOutput, ARTFieldInfo, add_art_field
-from yt.frontends.maestro.api import \
- MaestroStaticOutput, MaestroFieldInfo, add_maestro_field
+#from yt.frontends.maestro.api import \
+# MaestroStaticOutput, MaestroFieldInfo, add_maestro_field
from yt.analysis_modules.list_modules import \
get_available_modules, amods
https://bitbucket.org/yt_analysis/yt-3.0/changeset/1bb45063e8a2/
changeset: 1bb45063e8a2
branch: yt
user: MatthewTurk
date: 2012-08-07 20:19:36
summary: Disabling Maestro imports for now.
affected #: 1 file
diff -r c98bd507540cd694876ec036d5ca2a270ecb88b1 -r 1bb45063e8a2fb18c18b095f37fd96aae422e7b3 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -98,8 +98,8 @@
from yt.frontends.art.api import \
ARTStaticOutput, ARTFieldInfo, add_art_field
-from yt.frontends.maestro.api import \
- MaestroStaticOutput, MaestroFieldInfo, add_maestro_field
+#from yt.frontends.maestro.api import \
+# MaestroStaticOutput, MaestroFieldInfo, add_maestro_field
from yt.analysis_modules.list_modules import \
get_available_modules, amods
https://bitbucket.org/yt_analysis/yt-3.0/changeset/a0d43ccae65d/
changeset: a0d43ccae65d
branch: yt
user: jzuhone
date: 2012-08-07 23:32:33
summary: Fix for FLASH 1-2D datasets. The shape of the bounding box is the same regardless of the code version, at least from 2.x to 4.x. This ensures that the grid bondaries for the non-represented dimensions will be set to the domain boundaries.
affected #: 1 file
diff -r 1bb45063e8a2fb18c18b095f37fd96aae422e7b3 -r a0d43ccae65d9bfe84532a9fb563ae0ab341c8c8 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -107,15 +107,9 @@
self.grid_left_edge[:,i] = DLE[i]
self.grid_right_edge[:,i] = DRE[i]
# We only go up to ND for 2D datasets
- if (f["/bounding box"][:,:,0].shape[1] == ND) :
- #FLASH 2/3 2D data
- self.grid_left_edge[:,:ND] = f["/bounding box"][:,:,0]
- self.grid_right_edge[:,:ND] = f["/bounding box"][:,:,1]
- else:
- self.grid_left_edge[:,:] = f["/bounding box"][:,:,0]
- self.grid_right_edge[:,:] = f["/bounding box"][:,:,1]
-
-
+ self.grid_left_edge[:,:ND] = f["/bounding box"][:,:ND,0]
+ self.grid_right_edge[:,:ND] = f["/bounding box"][:,:ND,1]
+
# Move this to the parameter file
try:
nxb = pf.parameters['nxb']
https://bitbucket.org/yt_analysis/yt-3.0/changeset/db26aea59f01/
changeset: db26aea59f01
branch: yt
user: sskory
date: 2012-08-08 01:04:14
summary: Updates for the fortran kdtree for changes in Forthon.
affected #: 2 files
diff -r a0d43ccae65d9bfe84532a9fb563ae0ab341c8c8 -r db26aea59f01d7f93e1ebeeef9b95bdde1e2ec90 yt/utilities/kdtree/Makefile
--- a/yt/utilities/kdtree/Makefile
+++ b/yt/utilities/kdtree/Makefile
@@ -9,9 +9,9 @@
endif
fKD: fKD.f90 fKD.v fKD_source.f90
-# Forthon --compile_first fKD_source --no2underscores --with-numpy -g fKD fKD.f90 fKD_source.f90
+# Forthon --compile_first fKD_source --no2underscores -g fKD fKD.f90 fKD_source.f90
@echo "Using $(FORTHON) ($(FORTHON_EXE))"
- $(FORTHON) -F gfortran --compile_first fKD_source --no2underscores --with-numpy --fopt "-O3" fKD fKD_source.f90
+ $(FORTHON) -F gfortran --compile_first fKD_source --no2underscores --fopt "-O3" fKD fKD_source.f90
clean:
rm -rf build fKDpy.a fKDpy.so
diff -r a0d43ccae65d9bfe84532a9fb563ae0ab341c8c8 -r db26aea59f01d7f93e1ebeeef9b95bdde1e2ec90 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -50,7 +50,12 @@
config.add_subpackage("answer_testing")
config.add_subpackage("delaunay") # From SciPy, written by Robert Kern
config.add_subpackage("kdtree")
+ # The two fKDpy.so entries below are for different versions of
+ # Forthon, which build the fKDpy.so object in different places
+ # depending on version. The newest versions (0.8.10+) uses the
+ # build/lib*/ directory.
config.add_data_files(('kdtree', ['kdtree/fKDpy.so']))
+ config.add_data_files(('kdtree', ['kdtree/build/lib*/fKDpy.so']))
config.add_subpackage("spatial")
config.add_subpackage("grid_data_format")
config.add_subpackage("parallel_tools")
https://bitbucket.org/yt_analysis/yt-3.0/changeset/1b4b1087f896/
changeset: 1b4b1087f896
branch: yt
user: sskory
date: 2012-08-08 01:35:21
summary: Adding Forthon to install_script.sh.
affected #: 1 file
diff -r db26aea59f01d7f93e1ebeeef9b95bdde1e2ec90 -r 1b4b1087f896a09c333ba6af654bdd7acdf1b2be doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -417,6 +417,7 @@
echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
+echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93 Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
# Individual processes
[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.7.tar.gz
@@ -437,6 +438,7 @@
get_ytproject h5py-2.0.1.tar.gz
get_ytproject Cython-0.16.tar.gz
get_ytproject reason-js-20120623.zip
+get_ytproject Forthon-0.8.10.tar.gz
if [ $INST_BZLIB -eq 1 ]
then
@@ -674,6 +676,7 @@
do_setup_py ipython-0.13
do_setup_py h5py-2.0.1
do_setup_py Cython-0.16
+do_setup_py Forthon-0.8.10
[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
https://bitbucket.org/yt_analysis/yt-3.0/changeset/d8afbb316be3/
changeset: d8afbb316be3
branch: yt
user: sskory
date: 2012-08-08 05:14:24
summary: Let's just move the fKDpy.so file back where it has always been after compilation.
affected #: 2 files
diff -r 1b4b1087f896a09c333ba6af654bdd7acdf1b2be -r d8afbb316be369e3105df0b3b090fffc56e89257 yt/utilities/kdtree/Makefile
--- a/yt/utilities/kdtree/Makefile
+++ b/yt/utilities/kdtree/Makefile
@@ -12,6 +12,7 @@
# Forthon --compile_first fKD_source --no2underscores -g fKD fKD.f90 fKD_source.f90
@echo "Using $(FORTHON) ($(FORTHON_EXE))"
$(FORTHON) -F gfortran --compile_first fKD_source --no2underscores --fopt "-O3" fKD fKD_source.f90
+ mv build/lib*/fKDpy.so .
clean:
rm -rf build fKDpy.a fKDpy.so
diff -r 1b4b1087f896a09c333ba6af654bdd7acdf1b2be -r d8afbb316be369e3105df0b3b090fffc56e89257 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -50,12 +50,7 @@
config.add_subpackage("answer_testing")
config.add_subpackage("delaunay") # From SciPy, written by Robert Kern
config.add_subpackage("kdtree")
- # The two fKDpy.so entries below are for different versions of
- # Forthon, which build the fKDpy.so object in different places
- # depending on version. The newest versions (0.8.10+) uses the
- # build/lib*/ directory.
config.add_data_files(('kdtree', ['kdtree/fKDpy.so']))
- config.add_data_files(('kdtree', ['kdtree/build/lib*/fKDpy.so']))
config.add_subpackage("spatial")
config.add_subpackage("grid_data_format")
config.add_subpackage("parallel_tools")
https://bitbucket.org/yt_analysis/yt-3.0/changeset/cf03334db6e8/
changeset: cf03334db6e8
branch: yt
user: samskillman
date: 2012-08-07 21:36:19
summary: Modifying set_zlim, set_cmap, and set_log to work with field='all' to apply to all plots. Also adding dynamic_range=None to set_zlim, which bases the dynamic range off of which of zmin,zmax is set to None. If neither are None, defaults to setting zmin = zmax / dynamic_range. Finally, removing some tabs in place of spaces, and updating a few docstrings.
affected #: 1 file
diff -r 1bb45063e8a2fb18c18b095f37fd96aae422e7b3 -r cf03334db6e8dfd8b20bd330c9437136441e7bb9 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -458,10 +458,15 @@
Log on/off.
"""
- if log:
- self._field_transform[field] = log_transform
+ if field == 'all':
+ fields = self.plots.keys()
else:
- self._field_transform[field] = linear_transform
+ fields = [field]
+ for field in fields:
+ if log:
+ self._field_transform[field] = log_transform
+ else:
+ self._field_transform[field] = linear_transform
@invalidate_plot
def set_transform(self, field, name):
@@ -472,34 +477,70 @@
@invalidate_plot
def set_cmap(self, field, cmap_name):
"""set the colormap for one of the fields
-
+
Parameters
----------
field : string
- the field to set a transform
+ the field to set the colormap
+ if field == 'all', applies to all plots.
cmap_name : string
name of the colormap
"""
- self._colorbar_valid = False
- self._colormaps[field] = cmap_name
+
+ if field is 'all':
+ fields = self.plots.keys()
+ else:
+ fields = [field]
+ for field in fields:
+ self._colorbar_valid = False
+ self._colormaps[field] = cmap_name
@invalidate_plot
- def set_zlim(self, field, zmin, zmax):
+ def set_zlim(self, field, zmin, zmax, dynamic_range=None):
"""set the scale of the colormap
-
+
Parameters
----------
field : string
- the field to set a transform
+ the field to set a colormap scale
+ if field == 'all', applies to all plots.
zmin : float
- the new minimum of the colormap scale
+ the new minimum of the colormap scale. If 'min', will
+ set to the minimum value in the current view.
zmax : float
- the new maximum of the colormap scale
+ the new maximum of the colormap scale. If 'max', will
+ set to the maximum value in the current view.
+
+ Keyword Parameters
+ ------------------
+ dyanmic_range : float (default: None)
+ The dynamic range of the image.
+ If zmin == None, will set zmin = zmax / dynamic_range
+ If zmax == None, will set zmax = zmin * dynamic_range
+ When dynamic_range is specified, defaults to setting
+ zmin = zmax / dynamic_range.
"""
- self.plots[field].zmin = zmin
- self.plots[field].zmax = zmax
+ if field is 'all':
+ fields = self.plots.keys()
+ else:
+ fields = [field]
+ for field in fields:
+ myzmin = zmin
+ myzmax = zmax
+ if zmin == 'min':
+ myzmin = self.plots[field].image._A.min()
+ if zmax == 'max':
+ myzmax = self.plots[field].image._A.max()
+ if dynamic_range is not None:
+ if zmax is None:
+ myzmax = myzmin * dynamic_range
+ else:
+ myzmin = myzmax / dynamic_range
+
+ self.plots[field].zmin = myzmin
+ self.plots[field].zmax = myzmax
def setup_callbacks(self):
for key in callback_registry:
@@ -512,7 +553,7 @@
callback = invalidate_plot(apply_callback(CallbackMaker))
callback.__doc__ = CallbackMaker.__init__.__doc__
self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
-
+
def get_metadata(self, field, strip_mathml = True, return_string = True):
fval = self._frb[field]
mi = fval.min()
@@ -651,25 +692,32 @@
@invalidate_plot
def set_cmap(self, field, cmap):
"""set the colormap for one of the fields
-
+
Parameters
----------
field : string
the field to set a transform
+ if field == 'all', applies to all plots.
cmap_name : string
name of the colormap
"""
- self._colorbar_valid = False
- self._colormaps[field] = cmap
- if isinstance(cmap, types.StringTypes):
- if str(cmap) in yt_colormaps:
- cmap = yt_colormaps[str(cmap)]
- elif hasattr(matplotlib.cm, cmap):
- cmap = getattr(matplotlib.cm, cmap)
- if not is_colormap(cmap) and cmap is not None:
- raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
- self.plots[field].image.set_cmap(cmap)
+ if field == 'all':
+ fields = self.plots.keys()
+ else:
+ fields = [field]
+
+ for field in fields:
+ self._colorbar_valid = False
+ self._colormaps[field] = cmap
+ if isinstance(cmap, types.StringTypes):
+ if str(cmap) in yt_colormaps:
+ cmap = yt_colormaps[str(cmap)]
+ elif hasattr(matplotlib.cm, cmap):
+ cmap = getattr(matplotlib.cm, cmap)
+ if not is_colormap(cmap) and cmap is not None:
+ raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
+ self.plots[field].image.set_cmap(cmap)
def save(self,name=None):
"""saves the plot to disk.
@@ -762,7 +810,7 @@
the image centers on the location of the maximum density
cell. If set to 'c' or 'center', the plot is centered on
the middle of the domain.
- width : tuple or a float.
+ width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
@@ -781,7 +829,7 @@
the y axis. In the other two examples, code units are assumed, for example
(0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3
in code units.
- origin : string
+ origin : string
The location of the origin of the plot coordinate system.
Currently, can be set to three options: 'left-domain', corresponding
to the bottom-left hand corner of the simulation domain, 'center-domain',
@@ -830,7 +878,7 @@
the image centers on the location of the maximum density
cell. If set to 'c' or 'center', the plot is centered on
the middle of the domain.
- width : tuple or a float.
+ width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
https://bitbucket.org/yt_analysis/yt-3.0/changeset/5c1e00f021b0/
changeset: 5c1e00f021b0
branch: yt
user: ngoldbaum
date: 2012-08-08 10:09:11
summary: Merged in samskillman/yt (pull request #236)
affected #: 1 file
diff -r d8afbb316be369e3105df0b3b090fffc56e89257 -r 5c1e00f021b0fabb1261775853a79e75d08a8075 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -458,10 +458,15 @@
Log on/off.
"""
- if log:
- self._field_transform[field] = log_transform
+ if field == 'all':
+ fields = self.plots.keys()
else:
- self._field_transform[field] = linear_transform
+ fields = [field]
+ for field in fields:
+ if log:
+ self._field_transform[field] = log_transform
+ else:
+ self._field_transform[field] = linear_transform
@invalidate_plot
def set_transform(self, field, name):
@@ -472,34 +477,70 @@
@invalidate_plot
def set_cmap(self, field, cmap_name):
"""set the colormap for one of the fields
-
+
Parameters
----------
field : string
- the field to set a transform
+ the field to set the colormap
+ if field == 'all', applies to all plots.
cmap_name : string
name of the colormap
"""
- self._colorbar_valid = False
- self._colormaps[field] = cmap_name
+
+ if field is 'all':
+ fields = self.plots.keys()
+ else:
+ fields = [field]
+ for field in fields:
+ self._colorbar_valid = False
+ self._colormaps[field] = cmap_name
@invalidate_plot
- def set_zlim(self, field, zmin, zmax):
+ def set_zlim(self, field, zmin, zmax, dynamic_range=None):
"""set the scale of the colormap
-
+
Parameters
----------
field : string
- the field to set a transform
+ the field to set a colormap scale
+ if field == 'all', applies to all plots.
zmin : float
- the new minimum of the colormap scale
+ the new minimum of the colormap scale. If 'min', will
+ set to the minimum value in the current view.
zmax : float
- the new maximum of the colormap scale
+ the new maximum of the colormap scale. If 'max', will
+ set to the maximum value in the current view.
+
+ Keyword Parameters
+ ------------------
+ dyanmic_range : float (default: None)
+ The dynamic range of the image.
+ If zmin == None, will set zmin = zmax / dynamic_range
+ If zmax == None, will set zmax = zmin * dynamic_range
+ When dynamic_range is specified, defaults to setting
+ zmin = zmax / dynamic_range.
"""
- self.plots[field].zmin = zmin
- self.plots[field].zmax = zmax
+ if field is 'all':
+ fields = self.plots.keys()
+ else:
+ fields = [field]
+ for field in fields:
+ myzmin = zmin
+ myzmax = zmax
+ if zmin == 'min':
+ myzmin = self.plots[field].image._A.min()
+ if zmax == 'max':
+ myzmax = self.plots[field].image._A.max()
+ if dynamic_range is not None:
+ if zmax is None:
+ myzmax = myzmin * dynamic_range
+ else:
+ myzmin = myzmax / dynamic_range
+
+ self.plots[field].zmin = myzmin
+ self.plots[field].zmax = myzmax
def setup_callbacks(self):
for key in callback_registry:
@@ -512,7 +553,7 @@
callback = invalidate_plot(apply_callback(CallbackMaker))
callback.__doc__ = CallbackMaker.__init__.__doc__
self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
-
+
def get_metadata(self, field, strip_mathml = True, return_string = True):
fval = self._frb[field]
mi = fval.min()
@@ -651,25 +692,32 @@
@invalidate_plot
def set_cmap(self, field, cmap):
"""set the colormap for one of the fields
-
+
Parameters
----------
field : string
the field to set a transform
+ if field == 'all', applies to all plots.
cmap_name : string
name of the colormap
"""
- self._colorbar_valid = False
- self._colormaps[field] = cmap
- if isinstance(cmap, types.StringTypes):
- if str(cmap) in yt_colormaps:
- cmap = yt_colormaps[str(cmap)]
- elif hasattr(matplotlib.cm, cmap):
- cmap = getattr(matplotlib.cm, cmap)
- if not is_colormap(cmap) and cmap is not None:
- raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
- self.plots[field].image.set_cmap(cmap)
+ if field == 'all':
+ fields = self.plots.keys()
+ else:
+ fields = [field]
+
+ for field in fields:
+ self._colorbar_valid = False
+ self._colormaps[field] = cmap
+ if isinstance(cmap, types.StringTypes):
+ if str(cmap) in yt_colormaps:
+ cmap = yt_colormaps[str(cmap)]
+ elif hasattr(matplotlib.cm, cmap):
+ cmap = getattr(matplotlib.cm, cmap)
+ if not is_colormap(cmap) and cmap is not None:
+ raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
+ self.plots[field].image.set_cmap(cmap)
def save(self,name=None):
"""saves the plot to disk.
@@ -762,7 +810,7 @@
the image centers on the location of the maximum density
cell. If set to 'c' or 'center', the plot is centered on
the middle of the domain.
- width : tuple or a float.
+ width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
@@ -781,7 +829,7 @@
the y axis. In the other two examples, code units are assumed, for example
(0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3
in code units.
- origin : string
+ origin : string
The location of the origin of the plot coordinate system.
Currently, can be set to three options: 'left-domain', corresponding
to the bottom-left hand corner of the simulation domain, 'center-domain',
@@ -830,7 +878,7 @@
the image centers on the location of the maximum density
cell. If set to 'c' or 'center', the plot is centered on
the middle of the domain.
- width : tuple or a float.
+ width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
https://bitbucket.org/yt_analysis/yt-3.0/changeset/b6c9c207677c/
changeset: b6c9c207677c
branch: yt
user: MatthewTurk
date: 2012-08-08 19:10:16
summary: Adding a load_uniform_grid function, which accepts a dict of data and then sets
up a StreamHandler.
affected #: 2 files
diff -r 5c1e00f021b0fabb1261775853a79e75d08a8075 -r b6c9c207677cbbd6e1c36d51189634797e002c6d yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -28,7 +28,8 @@
StreamGrid, \
StreamHierarchy, \
StreamStaticOutput, \
- StreamHandler
+ StreamHandler, \
+ load_uniform_grid
from .fields import \
KnownStreamFields, \
diff -r 5c1e00f021b0fabb1261775853a79e75d08a8075 -r b6c9c207677cbbd6e1c36d51189634797e002c6d yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -40,6 +40,8 @@
FieldInfoContainer, NullFunc
from yt.utilities.lib import \
get_box_grids_level
+from yt.utilities.definitions import \
+ mpc_conversion, sec_conversion
from .fields import \
StreamFieldInfo, \
@@ -288,3 +290,89 @@
@classmethod
def _is_valid(cls, *args, **kwargs):
return False
+
+class StreamDictFieldHandler(dict):
+
+ @property
+ def all_fields(self): return self[0].keys()
+
+def load_uniform_grid(data, domain_dimensions, domain_size_in_cm):
+ r"""Load a uniform grid of data into yt as a
+ :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+ This should allow a uniform grid of data to be loaded directly into yt and
+ analyzed as would any others. This comes with several caveats:
+ * Units will be incorrect unless the data has already been converted to
+ cgs.
+ * Some functions may behave oddly, and parallelism will be
+ disappointing or non-existent in most cases.
+ * Particles may be difficult to integrate.
+
+ Parameters
+ ----------
+ data : dict
+ This is a dict of numpy arrays, where the keys are the field names.
+ domain_dimensiosn : array_like
+ This is the domain dimensions of the grid
+ domain_size_in_cm : float
+ The size of the domain, in centimeters
+
+ Examples
+ --------
+
+ >>> arr = na.random.random((256, 256, 256))
+ >>> data = dict(Density = arr)
+ >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
+ """
+ sfh = StreamDictFieldHandler()
+ sfh.update({0:data})
+ domain_dimensions = na.array(domain_dimensions)
+ if na.unique(domain_dimensions).size != 1:
+ print "We don't support variably sized domains yet."
+ raise RuntimeError
+ domain_left_edge = na.zeros(3, 'float64')
+ domain_right_edge = na.ones(3, 'float64')
+ grid_left_edges = na.zeros(3, "int64").reshape((1,3))
+ grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
+
+ grid_levels = na.array([0], dtype='int32').reshape((1,1))
+ grid_dimensions = grid_right_edges - grid_left_edges
+
+ grid_left_edges = grid_left_edges.astype("float64")
+ grid_left_edges /= domain_dimensions*2**grid_levels
+ grid_left_edges *= domain_right_edge - domain_left_edge
+ grid_left_edges += domain_left_edge
+
+ grid_right_edges = grid_right_edges.astype("float64")
+ grid_right_edges /= domain_dimensions*2**grid_levels
+ grid_right_edges *= domain_right_edge - domain_left_edge
+ grid_right_edges += domain_left_edge
+
+ handler = StreamHandler(
+ grid_left_edges,
+ grid_right_edges,
+ grid_dimensions,
+ grid_levels,
+ na.array([-1], dtype='int64'),
+ na.zeros(1, dtype='int64').reshape((1,1)),
+ na.zeros(1).reshape((1,1)),
+ sfh,
+ )
+
+ handler.name = "UniformGridData"
+ handler.domain_left_edge = domain_left_edge
+ handler.domain_right_edge = domain_right_edge
+ handler.refine_by = 2
+ handler.dimensionality = 3
+ handler.domain_dimensions = domain_dimensions
+ handler.simulation_time = 0.0
+ handler.cosmology_simulation = 0
+
+ spf = StreamStaticOutput(handler)
+ spf.units["cm"] = domain_size_in_cm
+ spf.units['1'] = 1.0
+ spf.units["unitary"] = 1.0
+ box_in_mpc = domain_size_in_cm / mpc_conversion['cm']
+ for unit in mpc_conversion.keys():
+ spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+ return spf
https://bitbucket.org/yt_analysis/yt-3.0/changeset/eb4afaa52375/
changeset: eb4afaa52375
branch: yt
user: MatthewTurk
date: 2012-08-09 22:26:27
summary: Fixing docstring in to_frb
affected #: 1 file
diff -r b6c9c207677cbbd6e1c36d51189634797e002c6d -r eb4afaa5237564d5e0fdd3df1b848684a4cb5000 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -871,9 +871,12 @@
units of the simulation, or a tuple of the (value, unit) style.
This will be the width of the FRB.
height : height specifier
- This will be the height of the FRB, by default it is equal to width.
+ This will be the physical height of the FRB, by default it is equal
+ to width. Note that this will not make any corrections to
+ resolution for the aspect ratio.
resolution : int or tuple of ints
- The number of pixels on a side of the final FRB.
+ The number of pixels on a side of the final FRB. If iterable, this
+ will be the width then the height.
center : array-like of floats, optional
The center of the FRB. If not specified, defaults to the center of
the current object.
https://bitbucket.org/yt_analysis/yt-3.0/changeset/5a76991a2229/
changeset: 5a76991a2229
branch: yt
user: jzuhone
date: 2012-08-11 22:54:12
summary: Adding "sim_time" and "number_of_particles" as optional arguments to load_uniform_grid. With these we can add a simulation time and particles to the uniform grid.
affected #: 1 file
diff -r eb4afaa5237564d5e0fdd3df1b848684a4cb5000 -r 5a76991a222984e6269a0a2a7dac3eee2c1c237b yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -40,8 +40,6 @@
FieldInfoContainer, NullFunc
from yt.utilities.lib import \
get_box_grids_level
-from yt.utilities.definitions import \
- mpc_conversion, sec_conversion
from .fields import \
StreamFieldInfo, \
@@ -290,89 +288,3 @@
@classmethod
def _is_valid(cls, *args, **kwargs):
return False
-
-class StreamDictFieldHandler(dict):
-
- @property
- def all_fields(self): return self[0].keys()
-
-def load_uniform_grid(data, domain_dimensions, domain_size_in_cm):
- r"""Load a uniform grid of data into yt as a
- :class:`~yt.frontends.stream.data_structures.StreamHandler`.
-
- This should allow a uniform grid of data to be loaded directly into yt and
- analyzed as would any others. This comes with several caveats:
- * Units will be incorrect unless the data has already been converted to
- cgs.
- * Some functions may behave oddly, and parallelism will be
- disappointing or non-existent in most cases.
- * Particles may be difficult to integrate.
-
- Parameters
- ----------
- data : dict
- This is a dict of numpy arrays, where the keys are the field names.
- domain_dimensiosn : array_like
- This is the domain dimensions of the grid
- domain_size_in_cm : float
- The size of the domain, in centimeters
-
- Examples
- --------
-
- >>> arr = na.random.random((256, 256, 256))
- >>> data = dict(Density = arr)
- >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
- """
- sfh = StreamDictFieldHandler()
- sfh.update({0:data})
- domain_dimensions = na.array(domain_dimensions)
- if na.unique(domain_dimensions).size != 1:
- print "We don't support variably sized domains yet."
- raise RuntimeError
- domain_left_edge = na.zeros(3, 'float64')
- domain_right_edge = na.ones(3, 'float64')
- grid_left_edges = na.zeros(3, "int64").reshape((1,3))
- grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
-
- grid_levels = na.array([0], dtype='int32').reshape((1,1))
- grid_dimensions = grid_right_edges - grid_left_edges
-
- grid_left_edges = grid_left_edges.astype("float64")
- grid_left_edges /= domain_dimensions*2**grid_levels
- grid_left_edges *= domain_right_edge - domain_left_edge
- grid_left_edges += domain_left_edge
-
- grid_right_edges = grid_right_edges.astype("float64")
- grid_right_edges /= domain_dimensions*2**grid_levels
- grid_right_edges *= domain_right_edge - domain_left_edge
- grid_right_edges += domain_left_edge
-
- handler = StreamHandler(
- grid_left_edges,
- grid_right_edges,
- grid_dimensions,
- grid_levels,
- na.array([-1], dtype='int64'),
- na.zeros(1, dtype='int64').reshape((1,1)),
- na.zeros(1).reshape((1,1)),
- sfh,
- )
-
- handler.name = "UniformGridData"
- handler.domain_left_edge = domain_left_edge
- handler.domain_right_edge = domain_right_edge
- handler.refine_by = 2
- handler.dimensionality = 3
- handler.domain_dimensions = domain_dimensions
- handler.simulation_time = 0.0
- handler.cosmology_simulation = 0
-
- spf = StreamStaticOutput(handler)
- spf.units["cm"] = domain_size_in_cm
- spf.units['1'] = 1.0
- spf.units["unitary"] = 1.0
- box_in_mpc = domain_size_in_cm / mpc_conversion['cm']
- for unit in mpc_conversion.keys():
- spf.units[unit] = mpc_conversion[unit] * box_in_mpc
- return spf
https://bitbucket.org/yt_analysis/yt-3.0/changeset/76eb9d59b2ec/
changeset: 76eb9d59b2ec
branch: yt
user: jzuhone
date: 2012-08-11 23:41:48
summary: This commit ACTUALLY gives us the changes we want
affected #: 1 file
diff -r 5a76991a222984e6269a0a2a7dac3eee2c1c237b -r 76eb9d59b2ececb409cea5775ae7df29522c336a yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -40,6 +40,8 @@
FieldInfoContainer, NullFunc
from yt.utilities.lib import \
get_box_grids_level
+from yt.utilities.definitions import \
+ mpc_conversion, sec_conversion
from .fields import \
StreamFieldInfo, \
@@ -288,3 +290,95 @@
@classmethod
def _is_valid(cls, *args, **kwargs):
return False
+
+class StreamDictFieldHandler(dict):
+
+ @property
+ def all_fields(self): return self[0].keys()
+
+def load_uniform_grid(data, domain_dimensions, domain_size_in_cm,
+ sim_time=0.0, number_of_particles=0):
+ r"""Load a uniform grid of data into yt as a
+ :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+ This should allow a uniform grid of data to be loaded directly into yt and
+ analyzed as would any others. This comes with several caveats:
+ * Units will be incorrect unless the data has already been converted to
+ cgs.
+ * Some functions may behave oddly, and parallelism will be
+ disappointing or non-existent in most cases.
+ * Particles may be difficult to integrate.
+
+ Parameters
+ ----------
+ data : dict
+ This is a dict of numpy arrays, where the keys are the field names.
+ domain_dimensiosn : array_like
+ This is the domain dimensions of the grid
+ domain_size_in_cm : float
+ The size of the domain, in centimeters
+ sim_time : float, optional
+ The simulation time in seconds
+ number_of_particles : int, optional
+ If particle fields are included, set this to the number of particles
+
+ Examples
+ --------
+
+ >>> arr = na.random.random((256, 256, 256))
+ >>> data = dict(Density = arr)
+ >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
+
+ """
+ sfh = StreamDictFieldHandler()
+ sfh.update({0:data})
+ domain_dimensions = na.array(domain_dimensions)
+ if na.unique(domain_dimensions).size != 1:
+ print "We don't support variably sized domains yet."
+ raise RuntimeError
+ domain_left_edge = na.zeros(3, 'float64')
+ domain_right_edge = na.ones(3, 'float64')
+ grid_left_edges = na.zeros(3, "int64").reshape((1,3))
+ grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
+
+ grid_levels = na.array([0], dtype='int32').reshape((1,1))
+ grid_dimensions = grid_right_edges - grid_left_edges
+
+ grid_left_edges = grid_left_edges.astype("float64")
+ grid_left_edges /= domain_dimensions*2**grid_levels
+ grid_left_edges *= domain_right_edge - domain_left_edge
+ grid_left_edges += domain_left_edge
+
+ grid_right_edges = grid_right_edges.astype("float64")
+ grid_right_edges /= domain_dimensions*2**grid_levels
+ grid_right_edges *= domain_right_edge - domain_left_edge
+ grid_right_edges += domain_left_edge
+
+ handler = StreamHandler(
+ grid_left_edges,
+ grid_right_edges,
+ grid_dimensions,
+ grid_levels,
+ na.array([-1], dtype='int64'),
+ number_of_particles*na.ones(1, dtype='int64').reshape((1,1)),
+ na.zeros(1).reshape((1,1)),
+ sfh,
+ )
+
+ handler.name = "UniformGridData"
+ handler.domain_left_edge = domain_left_edge
+ handler.domain_right_edge = domain_right_edge
+ handler.refine_by = 2
+ handler.dimensionality = 3
+ handler.domain_dimensions = domain_dimensions
+ handler.simulation_time = sim_time
+ handler.cosmology_simulation = 0
+
+ spf = StreamStaticOutput(handler)
+ spf.units["cm"] = domain_size_in_cm
+ spf.units['1'] = 1.0
+ spf.units["unitary"] = 1.0
+ box_in_mpc = domain_size_in_cm / mpc_conversion['cm']
+ for unit in mpc_conversion.keys():
+ spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+ return spf
https://bitbucket.org/yt_analysis/yt-3.0/changeset/16f5ecd96fab/
changeset: 16f5ecd96fab
branch: yt
user: astrofrog
date: 2012-08-13 12:25:08
summary: Added cmin/cmax options for plot_allsky_healpix
affected #: 1 file
diff -r 76eb9d59b2ececb409cea5775ae7df29522c336a -r 16f5ecd96fabc021697b84c566a77ec729348418 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1495,7 +1495,7 @@
return image[:,0,0]
def plot_allsky_healpix(image, nside, fn, label = "", rotation = None,
- take_log = True, resolution=512):
+ take_log = True, resolution=512, cmin=None, cmax=None):
import matplotlib.figure
import matplotlib.backends.backend_agg
if rotation is None: rotation = na.eye(3).astype("float64")
@@ -1507,7 +1507,7 @@
if take_log: func = na.log10
else: func = lambda a: a
implot = ax.imshow(func(img), extent=(-na.pi,na.pi,-na.pi/2,na.pi/2),
- clip_on=False, aspect=0.5)
+ clip_on=False, aspect=0.5, vmin=cmin, vmax=cmax)
cb = fig.colorbar(implot, orientation='horizontal')
cb.set_label(label)
ax.xaxis.set_ticks(())
https://bitbucket.org/yt_analysis/yt-3.0/changeset/843cf599c0a0/
changeset: 843cf599c0a0
branch: yt
user: jwise77
date: 2012-07-30 08:56:06
summary: Adding Renyue's fit for Chandra emissivity.
affected #: 1 file
diff -r 31d1c069f5a6e880f68415c2bd738ee9eed86787 -r 843cf599c0a0caf3ede3f7c7d36491bcfc203523 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -384,6 +384,36 @@
function=_CellVolume,
convert_function=_ConvertCellVolumeCGS)
+def _ChandraEmissivity(field, data):
+ logT0 = na.log10(data["Temperature"]) - 7
+ return ((data["NumberDensity"].astype('float64')**2.0) \
+ *(10**(-0.0103*logT0**8 \
+ +0.0417*logT0**7 \
+ -0.0636*logT0**6 \
+ +0.1149*logT0**5 \
+ -0.3151*logT0**4 \
+ +0.6655*logT0**3 \
+ -1.1256*logT0**2 \
+ +1.0026*logT0**1 \
+ -0.6984*logT0) \
+ +data["Metallicity"]*10**(0.0305*logT0**11 \
+ -0.0045*logT0**10 \
+ -0.3620*logT0**9 \
+ +0.0513*logT0**8 \
+ +1.6669*logT0**7 \
+ -0.3854*logT0**6 \
+ -3.3604*logT0**5 \
+ +0.4728*logT0**4 \
+ +4.5774*logT0**3 \
+ -2.3661*logT0**2 \
+ -1.6667*logT0**1 \
+ -0.2193*logT0)))
+def _convertChandraEmissivity(data):
+ return 1.0 #1.0e-23*0.76**2
+add_field("ChandraEmissivity", function=_ChandraEmissivity,
+ convert_function=_convertChandraEmissivity,
+ projection_conversion="1")
+
def _XRayEmissivity(field, data):
return ((data["Density"].astype('float64')**2.0) \
*data["Temperature"]**0.5)
https://bitbucket.org/yt_analysis/yt-3.0/changeset/b942a8fbf46e/
changeset: b942a8fbf46e
branch: yt
user: jwise77
date: 2012-07-30 08:56:52
summary: Adding total gas mass and separate total particle and gas mass derived quantities.
affected #: 1 file
diff -r 843cf599c0a0caf3ede3f7c7d36491bcfc203523 -r b942a8fbf46eaa8b8e4e844f2c38cddc349ca6ca yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -156,7 +156,32 @@
def _combTotalMass(data, total_mass):
return total_mass.sum()
add_quantity("TotalMass", function=_TotalMass,
- combine_function=_combTotalMass, n_ret=1)
+ combine_function=_combTotalMass, n_ret = 1)
+
+def _TotalGasMass(data):
+ """
+ This function takes no arguments and returns the sum of cell
+ masses in the object.
+ """
+ baryon_mass = data["CellMassMsun"].sum()
+ return [baryon_mass]
+def _combTotalGasMass(data, baryon_mass):
+ return baryon_mass.sum()
+add_quantity("TotalGasMass", function=_TotalGasMass,
+ combine_function=_combTotalGasMass, n_ret = 1)
+
+def _MatterMass(data):
+ """
+ This function takes no arguments and returns the array sum of cell masses
+ and particle masses.
+ """
+ cellvol = data["CellVolume"]
+ matter_rho = data["Matter_Density"]
+ return cellvol, matter_rho
+def _combMatterMass(data, cellvol, matter_rho):
+ return cellvol*matter_rho
+add_quantity("MatterMass", function=_MatterMass,
+ combine_function=_combMatterMass, n_ret=2)
def _CenterOfMass(data, use_cells=True, use_particles=False):
"""
https://bitbucket.org/yt_analysis/yt-3.0/changeset/80c99f3841a3/
changeset: 80c99f3841a3
branch: yt
user: jwise77
date: 2012-07-30 08:57:19
summary: Adding option to specify colorbar label for HEALPix camera image.
affected #: 1 file
diff -r b942a8fbf46eaa8b8e4e844f2c38cddc349ca6ca -r 80c99f3841a3c23dba4357e9fd494540fda185fa yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -793,7 +793,7 @@
return image
- def save_image(self, fn, clim, image):
+ def save_image(self, fn, clim, image, label=None):
if self.comm.rank is 0 and fn is not None:
# This assumes Density; this is a relatively safe assumption.
import matplotlib.figure
@@ -807,7 +807,11 @@
ax = fig.add_subplot(1,1,1,projection='hammer')
implot = ax.imshow(img, extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
cb = fig.colorbar(implot, orientation='horizontal')
- cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+
+ if label == None:
+ cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+ else:
+ cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[%s]$" % units)
if clim is not None: cb.set_clim(*clim)
ax.xaxis.set_ticks(())
ax.yaxis.set_ticks(())
https://bitbucket.org/yt_analysis/yt-3.0/changeset/c4c575a49599/
changeset: c4c575a49599
branch: yt
user: jwise77
date: 2012-08-06 14:25:40
summary: If no stars are left for star_analysis, return. Fixes to some B-field
labels.
affected #: 2 files
diff -r 0fae6c70a51b683546489137ac61184b3c11b39a -r c4c575a4959964a2d113247bec4de08ad383724a yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -393,6 +393,7 @@
dt = na.maximum(dt, 0.0)
# Remove young stars
sub = dt >= self.min_age
+ if len(sub) == 0: return
self.star_metal = self.star_metal[sub]
dt = dt[sub]
self.star_creation_time = self.star_creation_time[sub]
diff -r 0fae6c70a51b683546489137ac61184b3c11b39a -r c4c575a4959964a2d113247bec4de08ad383724a yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -171,16 +171,16 @@
# We set up fields for both TotalEnergy and Total_Energy in the known fields
# lists. Note that this does not mean these will be the used definitions.
add_enzo_field("TotalEnergy", function=NullFunc,
- display_name = "\mathrm{Total}\/\mathrm{Energy}",
+ display_name = "\rm{Total}\/\rm{Energy}",
units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
add_enzo_field("Total_Energy", function=NullFunc,
- display_name = "\mathrm{Total}\/\mathrm{Energy}",
+ display_name = "\rm{Total}\/\rm{Energy}",
units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
def _Total_Energy(field, data):
return data["TotalEnergy"] / _convertEnergy(data)
add_field("Total_Energy", function=_Total_Energy,
- display_name = "\mathrm{Total}\/\mathrm{Energy}",
+ display_name = "\rm{Total}\/\rm{Energy}",
units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
def _NumberDensity(field, data):
@@ -247,7 +247,7 @@
for field in ['Bx','By','Bz']:
f = KnownEnzoFields[field]
f._convert_function=_convertBfield
- f._units=r"\mathrm{Gau\ss}"
+ f._units=r"\rm{Gauss}"
f.take_log=False
def _convertRadiation(data):
@@ -447,14 +447,14 @@
return data['star_creation_time']
def _ConvertEnzoTimeYears(data):
return data.pf.time_units['years']
-add_field('StarCreationTimeYears', units=r"\mathrm{yr}",
+add_field('StarCreationTimeYears', units=r"\rm{yr}",
function=_StarCreationTime,
convert_function=_ConvertEnzoTimeYears,
projection_conversion="1")
def _StarDynamicalTime(field, data):
return data['star_dynamical_time']
-add_field('StarDynamicalTimeYears', units=r"\mathrm{yr}",
+add_field('StarDynamicalTimeYears', units=r"\rm{yr}",
function=_StarDynamicalTime,
convert_function=_ConvertEnzoTimeYears,
projection_conversion="1")
@@ -466,7 +466,7 @@
data.pf.current_time - \
data['StarCreationTimeYears'][with_stars]
return star_age
-add_field('StarAgeYears', units=r"\mathrm{yr}",
+add_field('StarAgeYears', units=r"\rm{yr}",
function=_StarAge,
projection_conversion="1")
@@ -476,20 +476,12 @@
add_field('IsStarParticle', function=_IsStarParticle,
particle_type = True)
-def _convertBfield(data):
- return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
-for field in ['Bx','By','Bz']:
- f = KnownEnzoFields[field]
- f._convert_function=_convertBfield
- f._units=r"\mathrm{Gauss}"
- f.take_log=False
-
def _Bmag(field, data):
""" magnitude of bvec
"""
return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\mathrm{Gauss}")
+add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\rm{Gauss}")
# Particle functions
@@ -645,17 +637,3 @@
add_enzo_1d_field("z-velocity", function=_zvel)
add_enzo_1d_field("y-velocity", function=_yvel)
-def _convertBfield(data):
- return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
-for field in ['Bx','By','Bz']:
- f = KnownEnzoFields[field]
- f._convert_function=_convertBfield
- f._units=r"\mathrm{Gauss}"
- f.take_log=False
-
-def _Bmag(field, data):
- """ magnitude of bvec
- """
- return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
-
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\mathrm{Gauss}")
https://bitbucket.org/yt_analysis/yt-3.0/changeset/c949e94a5ea7/
changeset: c949e94a5ea7
branch: yt
user: jwise77
date: 2012-08-06 14:35:14
summary: Merging.
affected #: 3 files
diff -r c4c575a4959964a2d113247bec4de08ad383724a -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -156,7 +156,32 @@
def _combTotalMass(data, total_mass):
return total_mass.sum()
add_quantity("TotalMass", function=_TotalMass,
- combine_function=_combTotalMass, n_ret=1)
+ combine_function=_combTotalMass, n_ret = 1)
+
+def _TotalGasMass(data):
+ """
+ This function takes no arguments and returns the sum of cell
+ masses in the object.
+ """
+ baryon_mass = data["CellMassMsun"].sum()
+ return [baryon_mass]
+def _combTotalGasMass(data, baryon_mass):
+ return baryon_mass.sum()
+add_quantity("TotalGasMass", function=_TotalGasMass,
+ combine_function=_combTotalGasMass, n_ret = 1)
+
+def _MatterMass(data):
+ """
+ This function takes no arguments and returns the array sum of cell masses
+ and particle masses.
+ """
+ cellvol = data["CellVolume"]
+ matter_rho = data["Matter_Density"]
+ return cellvol, matter_rho
+def _combMatterMass(data, cellvol, matter_rho):
+ return cellvol*matter_rho
+add_quantity("MatterMass", function=_MatterMass,
+ combine_function=_combMatterMass, n_ret=2)
def _CenterOfMass(data, use_cells=True, use_particles=False):
"""
diff -r c4c575a4959964a2d113247bec4de08ad383724a -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -388,6 +388,36 @@
function=_CellVolume,
convert_function=_ConvertCellVolumeCGS)
+def _ChandraEmissivity(field, data):
+ logT0 = na.log10(data["Temperature"]) - 7
+ return ((data["NumberDensity"].astype('float64')**2.0) \
+ *(10**(-0.0103*logT0**8 \
+ +0.0417*logT0**7 \
+ -0.0636*logT0**6 \
+ +0.1149*logT0**5 \
+ -0.3151*logT0**4 \
+ +0.6655*logT0**3 \
+ -1.1256*logT0**2 \
+ +1.0026*logT0**1 \
+ -0.6984*logT0) \
+ +data["Metallicity"]*10**(0.0305*logT0**11 \
+ -0.0045*logT0**10 \
+ -0.3620*logT0**9 \
+ +0.0513*logT0**8 \
+ +1.6669*logT0**7 \
+ -0.3854*logT0**6 \
+ -3.3604*logT0**5 \
+ +0.4728*logT0**4 \
+ +4.5774*logT0**3 \
+ -2.3661*logT0**2 \
+ -1.6667*logT0**1 \
+ -0.2193*logT0)))
+def _convertChandraEmissivity(data):
+ return 1.0 #1.0e-23*0.76**2
+add_field("ChandraEmissivity", function=_ChandraEmissivity,
+ convert_function=_convertChandraEmissivity,
+ projection_conversion="1")
+
def _XRayEmissivity(field, data):
return ((data["Density"].astype('float64')**2.0) \
*data["Temperature"]**0.5)
diff -r c4c575a4959964a2d113247bec4de08ad383724a -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -818,7 +818,7 @@
self.save_image(fn, clim, image)
return image
- def save_image(self, fn, clim, image):
+ def save_image(self, fn, clim, image, label=None):
if self.comm.rank is 0 and fn is not None:
# This assumes Density; this is a relatively safe assumption.
import matplotlib.figure
@@ -832,7 +832,11 @@
ax = fig.add_subplot(1,1,1,projection='hammer')
implot = ax.imshow(img, extent=(-na.pi,na.pi,-na.pi/2,na.pi/2), clip_on=False, aspect=0.5)
cb = fig.colorbar(implot, orientation='horizontal')
- cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+
+ if label == None:
+ cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+ else:
+ cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[%s]$" % units)
if clim is not None: cb.set_clim(*clim)
ax.xaxis.set_ticks(())
ax.yaxis.set_ticks(())
https://bitbucket.org/yt_analysis/yt-3.0/changeset/4ae1fd5367e4/
changeset: 4ae1fd5367e4
branch: yt
user: jwise77
date: 2012-08-10 14:33:19
summary: Merging
affected #: 9 files
diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -201,6 +201,12 @@
echo "$ export CC=gcc-4.2"
echo "$ export CXX=g++-4.2"
echo
+ OSX_VERSION=`sw_vers -productVersion`
+ if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
+ then
+ MPL_SUPP_CFLAGS="${MPL_SUPP_CFLAGS} -mmacosx-version-min=10.7"
+ MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
+ fi
fi
if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
then
@@ -411,6 +417,7 @@
echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
+echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93 Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
# Individual processes
[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.7.tar.gz
@@ -431,6 +438,7 @@
get_ytproject h5py-2.0.1.tar.gz
get_ytproject Cython-0.16.tar.gz
get_ytproject reason-js-20120623.zip
+get_ytproject Forthon-0.8.10.tar.gz
if [ $INST_BZLIB -eq 1 ]
then
@@ -668,6 +676,7 @@
do_setup_py ipython-0.13
do_setup_py h5py-2.0.1
do_setup_py Cython-0.16
+do_setup_py Forthon-0.8.10
[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -871,9 +871,12 @@
units of the simulation, or a tuple of the (value, unit) style.
This will be the width of the FRB.
height : height specifier
- This will be the height of the FRB, by default it is equal to width.
+ This will be the physical height of the FRB, by default it is equal
+ to width. Note that this will not make any corrections to
+ resolution for the aspect ratio.
resolution : int or tuple of ints
- The number of pixels on a side of the final FRB.
+ The number of pixels on a side of the final FRB. If iterable, this
+ will be the width then the height.
center : array-like of floats, optional
The center of the FRB. If not specified, defaults to the center of
the current object.
diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -107,15 +107,9 @@
self.grid_left_edge[:,i] = DLE[i]
self.grid_right_edge[:,i] = DRE[i]
# We only go up to ND for 2D datasets
- if (f["/bounding box"][:,:,0].shape[1] == ND) :
- #FLASH 2/3 2D data
- self.grid_left_edge[:,:ND] = f["/bounding box"][:,:,0]
- self.grid_right_edge[:,:ND] = f["/bounding box"][:,:,1]
- else:
- self.grid_left_edge[:,:] = f["/bounding box"][:,:,0]
- self.grid_right_edge[:,:] = f["/bounding box"][:,:,1]
-
-
+ self.grid_left_edge[:,:ND] = f["/bounding box"][:,:ND,0]
+ self.grid_right_edge[:,:ND] = f["/bounding box"][:,:ND,1]
+
# Move this to the parameter file
try:
nxb = pf.parameters['nxb']
diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -28,7 +28,8 @@
StreamGrid, \
StreamHierarchy, \
StreamStaticOutput, \
- StreamHandler
+ StreamHandler, \
+ load_uniform_grid
from .fields import \
KnownStreamFields, \
diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -40,6 +40,8 @@
FieldInfoContainer, NullFunc
from yt.utilities.lib import \
get_box_grids_level
+from yt.utilities.definitions import \
+ mpc_conversion, sec_conversion
from .fields import \
StreamFieldInfo, \
@@ -288,3 +290,89 @@
@classmethod
def _is_valid(cls, *args, **kwargs):
return False
+
+class StreamDictFieldHandler(dict):
+
+ @property
+ def all_fields(self): return self[0].keys()
+
+def load_uniform_grid(data, domain_dimensions, domain_size_in_cm):
+ r"""Load a uniform grid of data into yt as a
+ :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+ This should allow a uniform grid of data to be loaded directly into yt and
+ analyzed as would any others. This comes with several caveats:
+ * Units will be incorrect unless the data has already been converted to
+ cgs.
+ * Some functions may behave oddly, and parallelism will be
+ disappointing or non-existent in most cases.
+ * Particles may be difficult to integrate.
+
+ Parameters
+ ----------
+ data : dict
+ This is a dict of numpy arrays, where the keys are the field names.
+ domain_dimensiosn : array_like
+ This is the domain dimensions of the grid
+ domain_size_in_cm : float
+ The size of the domain, in centimeters
+
+ Examples
+ --------
+
+ >>> arr = na.random.random((256, 256, 256))
+ >>> data = dict(Density = arr)
+ >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
+ """
+ sfh = StreamDictFieldHandler()
+ sfh.update({0:data})
+ domain_dimensions = na.array(domain_dimensions)
+ if na.unique(domain_dimensions).size != 1:
+ print "We don't support variably sized domains yet."
+ raise RuntimeError
+ domain_left_edge = na.zeros(3, 'float64')
+ domain_right_edge = na.ones(3, 'float64')
+ grid_left_edges = na.zeros(3, "int64").reshape((1,3))
+ grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
+
+ grid_levels = na.array([0], dtype='int32').reshape((1,1))
+ grid_dimensions = grid_right_edges - grid_left_edges
+
+ grid_left_edges = grid_left_edges.astype("float64")
+ grid_left_edges /= domain_dimensions*2**grid_levels
+ grid_left_edges *= domain_right_edge - domain_left_edge
+ grid_left_edges += domain_left_edge
+
+ grid_right_edges = grid_right_edges.astype("float64")
+ grid_right_edges /= domain_dimensions*2**grid_levels
+ grid_right_edges *= domain_right_edge - domain_left_edge
+ grid_right_edges += domain_left_edge
+
+ handler = StreamHandler(
+ grid_left_edges,
+ grid_right_edges,
+ grid_dimensions,
+ grid_levels,
+ na.array([-1], dtype='int64'),
+ na.zeros(1, dtype='int64').reshape((1,1)),
+ na.zeros(1).reshape((1,1)),
+ sfh,
+ )
+
+ handler.name = "UniformGridData"
+ handler.domain_left_edge = domain_left_edge
+ handler.domain_right_edge = domain_right_edge
+ handler.refine_by = 2
+ handler.dimensionality = 3
+ handler.domain_dimensions = domain_dimensions
+ handler.simulation_time = 0.0
+ handler.cosmology_simulation = 0
+
+ spf = StreamStaticOutput(handler)
+ spf.units["cm"] = domain_size_in_cm
+ spf.units['1'] = 1.0
+ spf.units["unitary"] = 1.0
+ box_in_mpc = domain_size_in_cm / mpc_conversion['cm']
+ for unit in mpc_conversion.keys():
+ spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+ return spf
diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -98,8 +98,8 @@
from yt.frontends.art.api import \
ARTStaticOutput, ARTFieldInfo, add_art_field
-from yt.frontends.maestro.api import \
- MaestroStaticOutput, MaestroFieldInfo, add_maestro_field
+#from yt.frontends.maestro.api import \
+# MaestroStaticOutput, MaestroFieldInfo, add_maestro_field
from yt.analysis_modules.list_modules import \
get_available_modules, amods
diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac yt/utilities/kdtree/Makefile
--- a/yt/utilities/kdtree/Makefile
+++ b/yt/utilities/kdtree/Makefile
@@ -9,9 +9,10 @@
endif
fKD: fKD.f90 fKD.v fKD_source.f90
-# Forthon --compile_first fKD_source --no2underscores --with-numpy -g fKD fKD.f90 fKD_source.f90
+# Forthon --compile_first fKD_source --no2underscores -g fKD fKD.f90 fKD_source.f90
@echo "Using $(FORTHON) ($(FORTHON_EXE))"
- $(FORTHON) -F gfortran --compile_first fKD_source --no2underscores --with-numpy --fopt "-O3" fKD fKD_source.f90
+ $(FORTHON) -F gfortran --compile_first fKD_source --no2underscores --fopt "-O3" fKD fKD_source.f90
+ mv build/lib*/fKDpy.so .
clean:
rm -rf build fKDpy.a fKDpy.so
diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -458,10 +458,15 @@
Log on/off.
"""
- if log:
- self._field_transform[field] = log_transform
+ if field == 'all':
+ fields = self.plots.keys()
else:
- self._field_transform[field] = linear_transform
+ fields = [field]
+ for field in fields:
+ if log:
+ self._field_transform[field] = log_transform
+ else:
+ self._field_transform[field] = linear_transform
@invalidate_plot
def set_transform(self, field, name):
@@ -472,34 +477,70 @@
@invalidate_plot
def set_cmap(self, field, cmap_name):
"""set the colormap for one of the fields
-
+
Parameters
----------
field : string
- the field to set a transform
+ the field to set the colormap
+ if field == 'all', applies to all plots.
cmap_name : string
name of the colormap
"""
- self._colorbar_valid = False
- self._colormaps[field] = cmap_name
+
+ if field is 'all':
+ fields = self.plots.keys()
+ else:
+ fields = [field]
+ for field in fields:
+ self._colorbar_valid = False
+ self._colormaps[field] = cmap_name
@invalidate_plot
- def set_zlim(self, field, zmin, zmax):
+ def set_zlim(self, field, zmin, zmax, dynamic_range=None):
"""set the scale of the colormap
-
+
Parameters
----------
field : string
- the field to set a transform
+ the field to set a colormap scale
+ if field == 'all', applies to all plots.
zmin : float
- the new minimum of the colormap scale
+ the new minimum of the colormap scale. If 'min', will
+ set to the minimum value in the current view.
zmax : float
- the new maximum of the colormap scale
+ the new maximum of the colormap scale. If 'max', will
+ set to the maximum value in the current view.
+
+ Keyword Parameters
+ ------------------
+ dyanmic_range : float (default: None)
+ The dynamic range of the image.
+ If zmin == None, will set zmin = zmax / dynamic_range
+ If zmax == None, will set zmax = zmin * dynamic_range
+ When dynamic_range is specified, defaults to setting
+ zmin = zmax / dynamic_range.
"""
- self.plots[field].zmin = zmin
- self.plots[field].zmax = zmax
+ if field is 'all':
+ fields = self.plots.keys()
+ else:
+ fields = [field]
+ for field in fields:
+ myzmin = zmin
+ myzmax = zmax
+ if zmin == 'min':
+ myzmin = self.plots[field].image._A.min()
+ if zmax == 'max':
+ myzmax = self.plots[field].image._A.max()
+ if dynamic_range is not None:
+ if zmax is None:
+ myzmax = myzmin * dynamic_range
+ else:
+ myzmin = myzmax / dynamic_range
+
+ self.plots[field].zmin = myzmin
+ self.plots[field].zmax = myzmax
def setup_callbacks(self):
for key in callback_registry:
@@ -512,7 +553,7 @@
callback = invalidate_plot(apply_callback(CallbackMaker))
callback.__doc__ = CallbackMaker.__init__.__doc__
self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
-
+
def get_metadata(self, field, strip_mathml = True, return_string = True):
fval = self._frb[field]
mi = fval.min()
@@ -651,25 +692,32 @@
@invalidate_plot
def set_cmap(self, field, cmap):
"""set the colormap for one of the fields
-
+
Parameters
----------
field : string
the field to set a transform
+ if field == 'all', applies to all plots.
cmap_name : string
name of the colormap
"""
- self._colorbar_valid = False
- self._colormaps[field] = cmap
- if isinstance(cmap, types.StringTypes):
- if str(cmap) in yt_colormaps:
- cmap = yt_colormaps[str(cmap)]
- elif hasattr(matplotlib.cm, cmap):
- cmap = getattr(matplotlib.cm, cmap)
- if not is_colormap(cmap) and cmap is not None:
- raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
- self.plots[field].image.set_cmap(cmap)
+ if field == 'all':
+ fields = self.plots.keys()
+ else:
+ fields = [field]
+
+ for field in fields:
+ self._colorbar_valid = False
+ self._colormaps[field] = cmap
+ if isinstance(cmap, types.StringTypes):
+ if str(cmap) in yt_colormaps:
+ cmap = yt_colormaps[str(cmap)]
+ elif hasattr(matplotlib.cm, cmap):
+ cmap = getattr(matplotlib.cm, cmap)
+ if not is_colormap(cmap) and cmap is not None:
+ raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
+ self.plots[field].image.set_cmap(cmap)
def save(self,name=None):
"""saves the plot to disk.
@@ -762,7 +810,7 @@
the image centers on the location of the maximum density
cell. If set to 'c' or 'center', the plot is centered on
the middle of the domain.
- width : tuple or a float.
+ width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
@@ -781,7 +829,7 @@
the y axis. In the other two examples, code units are assumed, for example
(0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3
in code units.
- origin : string
+ origin : string
The location of the origin of the plot coordinate system.
Currently, can be set to three options: 'left-domain', corresponding
to the bottom-left hand corner of the simulation domain, 'center-domain',
@@ -830,7 +878,7 @@
the image centers on the location of the maximum density
cell. If set to 'c' or 'center', the plot is centered on
the middle of the domain.
- width : tuple or a float.
+ width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
https://bitbucket.org/yt_analysis/yt-3.0/changeset/e5a91d56225c/
changeset: e5a91d56225c
branch: yt
user: jwise77
date: 2012-08-10 15:32:35
summary: Adding the option to use find_outputs with cosmology_splice and the
simulation time series for simulations that have dt_output or
dcycle_output varying over the simulation.
affected #: 4 files
diff -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac -r e5a91d56225c2b5e4605a0a87c1f0ca689eb07e1 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -37,10 +37,11 @@
cosmological distance.
"""
- def __init__(self, parameter_filename, simulation_type):
+ def __init__(self, parameter_filename, simulation_type, find_outputs=False):
self.parameter_filename = parameter_filename
self.simulation_type = simulation_type
- self.simulation = simulation(parameter_filename, simulation_type)
+ self.simulation = simulation(parameter_filename, simulation_type,
+ find_outputs=find_outputs)
self.cosmology = Cosmology(
HubbleConstantNow=(100.0 * self.simulation.hubble_constant),
diff -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac -r e5a91d56225c2b5e4605a0a87c1f0ca689eb07e1 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -112,7 +112,7 @@
f.close()
return proj
-def simulation(parameter_filename, simulation_type):
+def simulation(parameter_filename, simulation_type, find_outputs=False):
"""
Loads a simulation time series object of the specified
simulation type.
@@ -121,5 +121,6 @@
if simulation_type not in simulation_time_series_registry:
raise YTSimulationNotIdentified(simulation_type)
- return simulation_time_series_registry[simulation_type](parameter_filename)
+ return simulation_time_series_registry[simulation_type](parameter_filename,
+ find_outputs=find_outputs)
diff -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac -r e5a91d56225c2b5e4605a0a87c1f0ca689eb07e1 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -320,7 +320,7 @@
simulation_time_series_registry[code_name] = cls
mylog.debug("Registering simulation: %s as %s", code_name, cls)
- def __init__(self, parameter_filename):
+ def __init__(self, parameter_filename, find_outputs=False):
"""
Base class for generating simulation time series types.
Principally consists of a *parameter_filename*.
@@ -345,7 +345,7 @@
self.print_key_parameters()
# Get all possible datasets.
- self._get_all_outputs()
+ self._get_all_outputs(find_outputs)
def __repr__(self):
return self.parameter_filename
diff -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac -r e5a91d56225c2b5e4605a0a87c1f0ca689eb07e1 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -48,7 +48,7 @@
r"""Class for creating TimeSeriesData object from an Enzo
simulation parameter file.
"""
- def __init__(self, parameter_filename):
+ def __init__(self, parameter_filename, find_outputs=False):
r"""Initialize an Enzo Simulation object.
Upon creation, the parameter file is parsed and the time and redshift
@@ -67,7 +67,7 @@
>>> print es.all_outputs
"""
- SimulationTimeSeries.__init__(self, parameter_filename)
+ SimulationTimeSeries.__init__(self, parameter_filename, find_outputs=find_outputs)
def get_time_series(self, time_data=True, redshift_data=True,
initial_time=None, final_time=None, time_units='1',
@@ -401,11 +401,12 @@
self.all_time_outputs.append(output)
index += 1
- def _get_all_outputs(self):
+ def _get_all_outputs(self, find_outputs=False):
"Get all potential datasets and combine into a time-sorted list."
- if self.parameters['dtDataDump'] > 0 and \
- self.parameters['CycleSkipDataDump'] > 0:
+ if find_outputs or \
+ (self.parameters['dtDataDump'] > 0 and \
+ self.parameters['CycleSkipDataDump'] > 0):
mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set." % self.parameter_filename )
mylog.info(" Unable to calculate datasets. Attempting to search in the current directory")
self.all_time_outputs = self._find_outputs()
https://bitbucket.org/yt_analysis/yt-3.0/changeset/1756e6a8c5a4/
changeset: 1756e6a8c5a4
branch: yt
user: jwise77
date: 2012-08-10 15:33:40
summary: Backing out these derived quantities because they were a convenience
and are possible with other quantities.
affected #: 1 file
diff -r e5a91d56225c2b5e4605a0a87c1f0ca689eb07e1 -r 1756e6a8c5a4954d4d92eda9df577ddf55a5ee90 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -156,32 +156,7 @@
def _combTotalMass(data, total_mass):
return total_mass.sum()
add_quantity("TotalMass", function=_TotalMass,
- combine_function=_combTotalMass, n_ret = 1)
-
-def _TotalGasMass(data):
- """
- This function takes no arguments and returns the sum of cell
- masses in the object.
- """
- baryon_mass = data["CellMassMsun"].sum()
- return [baryon_mass]
-def _combTotalGasMass(data, baryon_mass):
- return baryon_mass.sum()
-add_quantity("TotalGasMass", function=_TotalGasMass,
- combine_function=_combTotalGasMass, n_ret = 1)
-
-def _MatterMass(data):
- """
- This function takes no arguments and returns the array sum of cell masses
- and particle masses.
- """
- cellvol = data["CellVolume"]
- matter_rho = data["Matter_Density"]
- return cellvol, matter_rho
-def _combMatterMass(data, cellvol, matter_rho):
- return cellvol*matter_rho
-add_quantity("MatterMass", function=_MatterMass,
- combine_function=_combMatterMass, n_ret=2)
+ combine_function=_combTotalMass, n_ret=1)
def _CenterOfMass(data, use_cells=True, use_particles=False):
"""
https://bitbucket.org/yt_analysis/yt-3.0/changeset/4d131b55bd0b/
changeset: 4d131b55bd0b
branch: yt
user: jwise77
date: 2012-08-10 16:33:37
summary: Linking find_outputs to light_cone and light_ray. Cleaning up a bit
by storing find_outputs into SimulationTimeSeries.
affected #: 4 files
diff -r 1756e6a8c5a4954d4d92eda9df577ddf55a5ee90 -r 4d131b55bd0bd58cb6fdb7cbcdcd73eba0a60f0e yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -57,7 +57,7 @@
use_minimum_datasets=True, deltaz_min=0.0,
minimum_coherent_box_fraction=0.0,
time_data=True, redshift_data=True,
- set_parameters=None,
+ find_outputs=False, set_parameters=None,
output_dir='LC', output_prefix='LightCone'):
"""
Initialize a LightCone object.
@@ -102,6 +102,10 @@
Whether or not to include redshift outputs when gathering
datasets for time series.
Default: True.
+ find_outputs : bool
+ Whether or not to search for parameter files in the current
+ directory.
+ Default: False.
set_parameters : dict
Dictionary of parameters to attach to pf.parameters.
Default: None.
@@ -150,7 +154,8 @@
only_on_root(os.mkdir, self.output_dir)
# Calculate light cone solution.
- CosmologySplice.__init__(self, parameter_filename, simulation_type)
+ CosmologySplice.__init__(self, parameter_filename, simulation_type,
+ find_outputs=find_outputs)
self.light_cone_solution = \
self.create_cosmology_splice(self.near_redshift, self.far_redshift,
minimal=self.use_minimum_datasets,
diff -r 1756e6a8c5a4954d4d92eda9df577ddf55a5ee90 -r 4d131b55bd0bd58cb6fdb7cbcdcd73eba0a60f0e yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -44,7 +44,8 @@
near_redshift, far_redshift,
use_minimum_datasets=True, deltaz_min=0.0,
minimum_coherent_box_fraction=0.0,
- time_data=True, redshift_data=True):
+ time_data=True, redshift_data=True,
+ find_outputs=False):
"""
Create a LightRay object. A light ray is much like a light cone,
in that it stacks together multiple datasets in order to extend a
@@ -93,6 +94,10 @@
Whether or not to include redshift outputs when gathering
datasets for time series.
Default: True.
+ find_outputs : bool
+ Whether or not to search for parameter files in the current
+ directory.
+ Default: False.
"""
@@ -106,7 +111,8 @@
self._data = {}
# Get list of datasets for light ray solution.
- CosmologySplice.__init__(self, parameter_filename, simulation_type)
+ CosmologySplice.__init__(self, parameter_filename, simulation_type,
+ find_outputs=find_outputs)
self.light_ray_solution = \
self.create_cosmology_splice(self.near_redshift, self.far_redshift,
minimal=self.use_minimum_datasets,
diff -r 1756e6a8c5a4954d4d92eda9df577ddf55a5ee90 -r 4d131b55bd0bd58cb6fdb7cbcdcd73eba0a60f0e yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -331,6 +331,7 @@
self.parameter_filename = parameter_filename
self.basename = os.path.basename(parameter_filename)
self.directory = os.path.dirname(parameter_filename)
+ self.find_outputs = find_outputs
self.parameters = {}
# Set some parameter defaults.
@@ -345,7 +346,7 @@
self.print_key_parameters()
# Get all possible datasets.
- self._get_all_outputs(find_outputs)
+ self._get_all_outputs()
def __repr__(self):
return self.parameter_filename
diff -r 1756e6a8c5a4954d4d92eda9df577ddf55a5ee90 -r 4d131b55bd0bd58cb6fdb7cbcdcd73eba0a60f0e yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -67,7 +67,8 @@
>>> print es.all_outputs
"""
- SimulationTimeSeries.__init__(self, parameter_filename, find_outputs=find_outputs)
+ SimulationTimeSeries.__init__(self, parameter_filename,
+ find_outputs=find_outputs)
def get_time_series(self, time_data=True, redshift_data=True,
initial_time=None, final_time=None, time_units='1',
@@ -401,10 +402,10 @@
self.all_time_outputs.append(output)
index += 1
- def _get_all_outputs(self, find_outputs=False):
+ def _get_all_outputs(self):
"Get all potential datasets and combine into a time-sorted list."
- if find_outputs or \
+ if self.find_outputs or \
(self.parameters['dtDataDump'] > 0 and \
self.parameters['CycleSkipDataDump'] > 0):
mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set." % self.parameter_filename )
https://bitbucket.org/yt_analysis/yt-3.0/changeset/13fc27172523/
changeset: 13fc27172523
branch: yt
user: jwise77
date: 2012-08-10 17:26:26
summary: Changing default label of HEALpixCamera to "Projected <field>". The
users can still specify a label.
affected #: 1 file
diff -r 4d131b55bd0bd58cb6fdb7cbcdcd73eba0a60f0e -r 13fc2717252356e55ca6106ec58322ead8cf537f yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -788,7 +788,7 @@
return image
def snapshot(self, fn = None, clip_ratio = None, double_check = False,
- num_threads = 0, clim = None):
+ num_threads = 0, clim = None, label = None):
r"""Ray-cast the camera.
This method instructs the camera to take a snapshot -- i.e., call the ray
@@ -815,10 +815,10 @@
sampler = self.get_sampler(args)
self.volume.initialize_source()
image = self._render(double_check, num_threads, image, sampler)
- self.save_image(fn, clim, image)
+ self.save_image(fn, clim, image, label = label)
return image
- def save_image(self, fn, clim, image, label=None):
+ def save_image(self, fn, clim, image, label = None):
if self.comm.rank is 0 and fn is not None:
# This assumes Density; this is a relatively safe assumption.
import matplotlib.figure
@@ -834,9 +834,9 @@
cb = fig.colorbar(implot, orientation='horizontal')
if label == None:
- cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+ cb.set_label("Projected %s" % self.fields[0])
else:
- cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[%s]$" % units)
+ cb.set_label(label)
if clim is not None: cb.set_clim(*clim)
ax.xaxis.set_ticks(())
ax.yaxis.set_ticks(())
https://bitbucket.org/yt_analysis/yt-3.0/changeset/fa9b83bd9358/
changeset: fa9b83bd9358
branch: yt
user: brittonsmith
date: 2012-08-12 15:45:57
summary: Moved find_outputs keyword to SimulationTimeSeries.__init__ to allow
further slection of outputs in get_time_series. This will also makes
it accessible to the light cone and light ray.
affected #: 2 files
diff -r 13fc2717252356e55ca6106ec58322ead8cf537f -r fa9b83bd93588c21d8708cd115ef032e88a9471d yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -331,7 +331,6 @@
self.parameter_filename = parameter_filename
self.basename = os.path.basename(parameter_filename)
self.directory = os.path.dirname(parameter_filename)
- self.find_outputs = find_outputs
self.parameters = {}
# Set some parameter defaults.
@@ -346,7 +345,7 @@
self.print_key_parameters()
# Get all possible datasets.
- self._get_all_outputs()
+ self._get_all_outputs(find_outputs=find_outputs)
def __repr__(self):
return self.parameter_filename
diff -r 13fc2717252356e55ca6106ec58322ead8cf537f -r fa9b83bd93588c21d8708cd115ef032e88a9471d yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -59,6 +59,14 @@
parameter_filename : str
The simulation parameter file.
+ find_outputs : bool
+ If True, subdirectories within the GlobalDir directory are
+ searched one by one for datasets. Time and redshift
+ information are gathered by temporarily instantiating each
+ dataset. This can be used when simulation data was created
+ in a non-standard way, making it difficult to guess the
+ corresponding time and redshift information.
+ Default: False.
Examples
--------
@@ -67,7 +75,7 @@
>>> print es.all_outputs
"""
- SimulationTimeSeries.__init__(self, parameter_filename,
+ SimulationTimeSeries.__init__(self, parameter_filename,
find_outputs=find_outputs)
def get_time_series(self, time_data=True, redshift_data=True,
@@ -75,7 +83,7 @@
initial_redshift=None, final_redshift=None,
initial_cycle=None, final_cycle=None,
times=None, redshifts=None, tolerance=None,
- find_outputs=False, parallel=True):
+ parallel=True):
"""
Instantiate a TimeSeriesData object for a set of outputs.
@@ -146,14 +154,6 @@
given the requested times or redshifts. If None, the
nearest output is always taken.
Default: None.
- find_outputs : bool
- If True, subdirectories within the GlobalDir directory are
- searched one by one for datasets. Time and redshift
- information are gathered by temporarily instantiating each
- dataset. This can be used when simulation data was created
- in a non-standard way, making it difficult to guess the
- corresponding time and redshift information.
- Default: False.
parallel : bool/int
If True, the generated TimeSeriesData will divide the work
such that a single processor works on each dataset. If an
@@ -186,20 +186,15 @@
mylog.error('An initial or final redshift has been given for a noncosmological simulation.')
return
- # Create the set of outputs from which further selection will be done.
- if find_outputs:
- my_all_outputs = self._find_outputs()
-
+ if time_data and redshift_data:
+ my_all_outputs = self.all_outputs
+ elif time_data:
+ my_all_outputs = self.all_time_outputs
+ elif redshift_data:
+ my_all_outputs = self.all_redshift_outputs
else:
- if time_data and redshift_data:
- my_all_outputs = self.all_outputs
- elif time_data:
- my_all_outputs = self.all_time_outputs
- elif redshift_data:
- my_all_outputs = self.all_redshift_outputs
- else:
- mylog.error('Both time_data and redshift_data are False.')
- return
+ mylog.error('Both time_data and redshift_data are False.')
+ return
# Apply selection criteria to the set.
if times is not None:
@@ -355,6 +350,7 @@
for output in self.all_redshift_outputs:
output['time'] = self.enzo_cosmology.ComputeTimeFromRedshift(output['redshift']) / \
self.enzo_cosmology.TimeUnits
+ self.all_redshift_outputs.sort(key=lambda obj:obj['time'])
def _calculate_time_outputs(self):
"Calculate time outputs and their redshifts if cosmological."
@@ -402,28 +398,32 @@
self.all_time_outputs.append(output)
index += 1
- def _get_all_outputs(self):
+ def _get_all_outputs(self, find_outputs=False):
"Get all potential datasets and combine into a time-sorted list."
- if self.find_outputs or \
- (self.parameters['dtDataDump'] > 0 and \
- self.parameters['CycleSkipDataDump'] > 0):
+ # Create the set of outputs from which further selection will be done.
+ if find_outputs:
+ self._find_outputs()
+
+ elif self.parameters['dtDataDump'] > 0 and \
+ self.parameters['CycleSkipDataDump'] > 0:
mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set." % self.parameter_filename )
mylog.info(" Unable to calculate datasets. Attempting to search in the current directory")
- self.all_time_outputs = self._find_outputs()
+ self._find_outputs()
- # Get all time or cycle outputs.
- elif self.parameters['CycleSkipDataDump'] > 0:
- self._calculate_cycle_outputs()
else:
- self._calculate_time_outputs()
+ # Get all time or cycle outputs.
+ if self.parameters['CycleSkipDataDump'] > 0:
+ self._calculate_cycle_outputs()
+ else:
+ self._calculate_time_outputs()
- # Calculate times for redshift outputs.
- self._calculate_redshift_dump_times()
+ # Calculate times for redshift outputs.
+ self._calculate_redshift_dump_times()
- self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
- if self.parameters['CycleSkipDataDump'] <= 0:
- self.all_outputs.sort(key=lambda obj:obj['time'])
+ self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
+ if self.parameters['CycleSkipDataDump'] <= 0:
+ self.all_outputs.sort(key=lambda obj:obj['time'])
mylog.info("Total datasets: %d." % len(self.all_outputs))
@@ -505,14 +505,32 @@
"""
# look for time outputs.
- potential_outputs = glob.glob(os.path.join(self.parameters['GlobalDir'],
- "%s*" % self.parameters['DataDumpDir'])) + \
- glob.glob(os.path.join(self.parameters['GlobalDir'],
- "%s*" % self.parameters['RedshiftDumpDir']))
- time_outputs = []
- mylog.info("Checking %d potential time outputs." %
+ potential_time_outputs = \
+ glob.glob(os.path.join(self.parameters['GlobalDir'],
+ "%s*" % self.parameters['DataDumpDir']))
+ self.all_time_outputs = \
+ self._check_for_outputs(potential_time_outputs)
+ self.all_time_outputs.sort(key=lambda obj: obj['time'])
+
+ # look for redshift outputs.
+ potential_redshift_outputs = \
+ glob.glob(os.path.join(self.parameters['GlobalDir'],
+ "%s*" % self.parameters['RedshiftDumpDir']))
+ self.all_redshift_outputs = \
+ self._check_for_outputs(potential_redshift_outputs)
+ self.all_redshift_outputs.sort(key=lambda obj: obj['time'])
+
+ self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
+ self.all_outputs.sort(key=lambda obj: obj['time'])
+ mylog.info("Located %d total outputs." % len(self.all_outputs))
+
+ def _check_for_outputs(self, potential_outputs):
+ r"""Check a list of files to see if they are valid datasets."""
+
+ mylog.info("Checking %d potential outputs." %
len(potential_outputs))
+ my_outputs = []
for output in potential_outputs:
if self.parameters['DataDumpDir'] in output:
dir_key = self.parameters['DataDumpDir']
@@ -528,15 +546,14 @@
try:
pf = load(filename)
if pf is not None:
- time_outputs.append({'filename': filename, 'time': pf.current_time})
+ my_outputs.append({'filename': filename,
+ 'time': pf.current_time})
if pf.cosmological_simulation:
- time_outputs[-1]['redshift'] = pf.current_redshift
+ my_outputs[-1]['redshift'] = pf.current_redshift
except YTOutputNotIdentified:
mylog.error('Failed to load %s' % filename)
- mylog.info("Located %d time outputs." % len(time_outputs))
- time_outputs.sort(key=lambda obj: obj['time'])
- return time_outputs
+ return my_outputs
def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):
r"""Get datasets at or near to given values.
https://bitbucket.org/yt_analysis/yt-3.0/changeset/514f5a6a60c8/
changeset: 514f5a6a60c8
branch: yt
user: brittonsmith
date: 2012-08-13 17:11:31
summary: Merged in jwise77/yt (pull request #241)
affected #: 11 files
diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -37,10 +37,11 @@
cosmological distance.
"""
- def __init__(self, parameter_filename, simulation_type):
+ def __init__(self, parameter_filename, simulation_type, find_outputs=False):
self.parameter_filename = parameter_filename
self.simulation_type = simulation_type
- self.simulation = simulation(parameter_filename, simulation_type)
+ self.simulation = simulation(parameter_filename, simulation_type,
+ find_outputs=find_outputs)
self.cosmology = Cosmology(
HubbleConstantNow=(100.0 * self.simulation.hubble_constant),
diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -57,7 +57,7 @@
use_minimum_datasets=True, deltaz_min=0.0,
minimum_coherent_box_fraction=0.0,
time_data=True, redshift_data=True,
- set_parameters=None,
+ find_outputs=False, set_parameters=None,
output_dir='LC', output_prefix='LightCone'):
"""
Initialize a LightCone object.
@@ -102,6 +102,10 @@
Whether or not to include redshift outputs when gathering
datasets for time series.
Default: True.
+ find_outputs : bool
+ Whether or not to search for parameter files in the current
+ directory.
+ Default: False.
set_parameters : dict
Dictionary of parameters to attach to pf.parameters.
Default: None.
@@ -150,7 +154,8 @@
only_on_root(os.mkdir, self.output_dir)
# Calculate light cone solution.
- CosmologySplice.__init__(self, parameter_filename, simulation_type)
+ CosmologySplice.__init__(self, parameter_filename, simulation_type,
+ find_outputs=find_outputs)
self.light_cone_solution = \
self.create_cosmology_splice(self.near_redshift, self.far_redshift,
minimal=self.use_minimum_datasets,
diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -44,7 +44,8 @@
near_redshift, far_redshift,
use_minimum_datasets=True, deltaz_min=0.0,
minimum_coherent_box_fraction=0.0,
- time_data=True, redshift_data=True):
+ time_data=True, redshift_data=True,
+ find_outputs=False):
"""
Create a LightRay object. A light ray is much like a light cone,
in that it stacks together multiple datasets in order to extend a
@@ -93,6 +94,10 @@
Whether or not to include redshift outputs when gathering
datasets for time series.
Default: True.
+ find_outputs : bool
+ Whether or not to search for parameter files in the current
+ directory.
+ Default: False.
"""
@@ -106,7 +111,8 @@
self._data = {}
# Get list of datasets for light ray solution.
- CosmologySplice.__init__(self, parameter_filename, simulation_type)
+ CosmologySplice.__init__(self, parameter_filename, simulation_type,
+ find_outputs=find_outputs)
self.light_ray_solution = \
self.create_cosmology_splice(self.near_redshift, self.far_redshift,
minimal=self.use_minimum_datasets,
diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -393,6 +393,7 @@
dt = na.maximum(dt, 0.0)
# Remove young stars
sub = dt >= self.min_age
+ if len(sub) == 0: return
self.star_metal = self.star_metal[sub]
dt = dt[sub]
self.star_creation_time = self.star_creation_time[sub]
diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -112,7 +112,7 @@
f.close()
return proj
-def simulation(parameter_filename, simulation_type):
+def simulation(parameter_filename, simulation_type, find_outputs=False):
"""
Loads a simulation time series object of the specified
simulation type.
@@ -121,5 +121,6 @@
if simulation_type not in simulation_time_series_registry:
raise YTSimulationNotIdentified(simulation_type)
- return simulation_time_series_registry[simulation_type](parameter_filename)
+ return simulation_time_series_registry[simulation_type](parameter_filename,
+ find_outputs=find_outputs)
diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -320,7 +320,7 @@
simulation_time_series_registry[code_name] = cls
mylog.debug("Registering simulation: %s as %s", code_name, cls)
- def __init__(self, parameter_filename):
+ def __init__(self, parameter_filename, find_outputs=False):
"""
Base class for generating simulation time series types.
Principally consists of a *parameter_filename*.
@@ -345,7 +345,7 @@
self.print_key_parameters()
# Get all possible datasets.
- self._get_all_outputs()
+ self._get_all_outputs(find_outputs=find_outputs)
def __repr__(self):
return self.parameter_filename
diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -388,6 +388,36 @@
function=_CellVolume,
convert_function=_ConvertCellVolumeCGS)
+def _ChandraEmissivity(field, data):
+ logT0 = na.log10(data["Temperature"]) - 7
+ return ((data["NumberDensity"].astype('float64')**2.0) \
+ *(10**(-0.0103*logT0**8 \
+ +0.0417*logT0**7 \
+ -0.0636*logT0**6 \
+ +0.1149*logT0**5 \
+ -0.3151*logT0**4 \
+ +0.6655*logT0**3 \
+ -1.1256*logT0**2 \
+ +1.0026*logT0**1 \
+ -0.6984*logT0) \
+ +data["Metallicity"]*10**(0.0305*logT0**11 \
+ -0.0045*logT0**10 \
+ -0.3620*logT0**9 \
+ +0.0513*logT0**8 \
+ +1.6669*logT0**7 \
+ -0.3854*logT0**6 \
+ -3.3604*logT0**5 \
+ +0.4728*logT0**4 \
+ +4.5774*logT0**3 \
+ -2.3661*logT0**2 \
+ -1.6667*logT0**1 \
+ -0.2193*logT0)))
+def _convertChandraEmissivity(data):
+ return 1.0 #1.0e-23*0.76**2
+add_field("ChandraEmissivity", function=_ChandraEmissivity,
+ convert_function=_convertChandraEmissivity,
+ projection_conversion="1")
+
def _XRayEmissivity(field, data):
return ((data["Density"].astype('float64')**2.0) \
*data["Temperature"]**0.5)
diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -171,16 +171,16 @@
# We set up fields for both TotalEnergy and Total_Energy in the known fields
# lists. Note that this does not mean these will be the used definitions.
add_enzo_field("TotalEnergy", function=NullFunc,
- display_name = "\mathrm{Total}\/\mathrm{Energy}",
+ display_name = "\rm{Total}\/\rm{Energy}",
units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
add_enzo_field("Total_Energy", function=NullFunc,
- display_name = "\mathrm{Total}\/\mathrm{Energy}",
+ display_name = "\rm{Total}\/\rm{Energy}",
units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
def _Total_Energy(field, data):
return data["TotalEnergy"] / _convertEnergy(data)
add_field("Total_Energy", function=_Total_Energy,
- display_name = "\mathrm{Total}\/\mathrm{Energy}",
+ display_name = "\rm{Total}\/\rm{Energy}",
units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
def _NumberDensity(field, data):
@@ -247,7 +247,7 @@
for field in ['Bx','By','Bz']:
f = KnownEnzoFields[field]
f._convert_function=_convertBfield
- f._units=r"\mathrm{Gau\ss}"
+ f._units=r"\rm{Gauss}"
f.take_log=False
def _convertRadiation(data):
@@ -447,14 +447,14 @@
return data['star_creation_time']
def _ConvertEnzoTimeYears(data):
return data.pf.time_units['years']
-add_field('StarCreationTimeYears', units=r"\mathrm{yr}",
+add_field('StarCreationTimeYears', units=r"\rm{yr}",
function=_StarCreationTime,
convert_function=_ConvertEnzoTimeYears,
projection_conversion="1")
def _StarDynamicalTime(field, data):
return data['star_dynamical_time']
-add_field('StarDynamicalTimeYears', units=r"\mathrm{yr}",
+add_field('StarDynamicalTimeYears', units=r"\rm{yr}",
function=_StarDynamicalTime,
convert_function=_ConvertEnzoTimeYears,
projection_conversion="1")
@@ -466,7 +466,7 @@
data.pf.current_time - \
data['StarCreationTimeYears'][with_stars]
return star_age
-add_field('StarAgeYears', units=r"\mathrm{yr}",
+add_field('StarAgeYears', units=r"\rm{yr}",
function=_StarAge,
projection_conversion="1")
@@ -476,20 +476,12 @@
add_field('IsStarParticle', function=_IsStarParticle,
particle_type = True)
-def _convertBfield(data):
- return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
-for field in ['Bx','By','Bz']:
- f = KnownEnzoFields[field]
- f._convert_function=_convertBfield
- f._units=r"\mathrm{Gauss}"
- f.take_log=False
-
def _Bmag(field, data):
""" magnitude of bvec
"""
return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\mathrm{Gauss}")
+add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\rm{Gauss}")
# Particle functions
@@ -645,17 +637,3 @@
add_enzo_1d_field("z-velocity", function=_zvel)
add_enzo_1d_field("y-velocity", function=_yvel)
-def _convertBfield(data):
- return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
-for field in ['Bx','By','Bz']:
- f = KnownEnzoFields[field]
- f._convert_function=_convertBfield
- f._units=r"\mathrm{Gauss}"
- f.take_log=False
-
-def _Bmag(field, data):
- """ magnitude of bvec
- """
- return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
-
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\mathrm{Gauss}")
diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -48,7 +48,7 @@
r"""Class for creating TimeSeriesData object from an Enzo
simulation parameter file.
"""
- def __init__(self, parameter_filename):
+ def __init__(self, parameter_filename, find_outputs=False):
r"""Initialize an Enzo Simulation object.
Upon creation, the parameter file is parsed and the time and redshift
@@ -59,6 +59,14 @@
parameter_filename : str
The simulation parameter file.
+ find_outputs : bool
+ If True, subdirectories within the GlobalDir directory are
+ searched one by one for datasets. Time and redshift
+ information are gathered by temporarily instantiating each
+ dataset. This can be used when simulation data was created
+ in a non-standard way, making it difficult to guess the
+ corresponding time and redshift information.
+ Default: False.
Examples
--------
@@ -67,14 +75,15 @@
>>> print es.all_outputs
"""
- SimulationTimeSeries.__init__(self, parameter_filename)
+ SimulationTimeSeries.__init__(self, parameter_filename,
+ find_outputs=find_outputs)
def get_time_series(self, time_data=True, redshift_data=True,
initial_time=None, final_time=None, time_units='1',
initial_redshift=None, final_redshift=None,
initial_cycle=None, final_cycle=None,
times=None, redshifts=None, tolerance=None,
- find_outputs=False, parallel=True):
+ parallel=True):
"""
Instantiate a TimeSeriesData object for a set of outputs.
@@ -145,14 +154,6 @@
given the requested times or redshifts. If None, the
nearest output is always taken.
Default: None.
- find_outputs : bool
- If True, subdirectories within the GlobalDir directory are
- searched one by one for datasets. Time and redshift
- information are gathered by temporarily instantiating each
- dataset. This can be used when simulation data was created
- in a non-standard way, making it difficult to guess the
- corresponding time and redshift information.
- Default: False.
parallel : bool/int
If True, the generated TimeSeriesData will divide the work
such that a single processor works on each dataset. If an
@@ -185,20 +186,15 @@
mylog.error('An initial or final redshift has been given for a noncosmological simulation.')
return
- # Create the set of outputs from which further selection will be done.
- if find_outputs:
- my_all_outputs = self._find_outputs()
-
+ if time_data and redshift_data:
+ my_all_outputs = self.all_outputs
+ elif time_data:
+ my_all_outputs = self.all_time_outputs
+ elif redshift_data:
+ my_all_outputs = self.all_redshift_outputs
else:
- if time_data and redshift_data:
- my_all_outputs = self.all_outputs
- elif time_data:
- my_all_outputs = self.all_time_outputs
- elif redshift_data:
- my_all_outputs = self.all_redshift_outputs
- else:
- mylog.error('Both time_data and redshift_data are False.')
- return
+ mylog.error('Both time_data and redshift_data are False.')
+ return
# Apply selection criteria to the set.
if times is not None:
@@ -354,6 +350,7 @@
for output in self.all_redshift_outputs:
output['time'] = self.enzo_cosmology.ComputeTimeFromRedshift(output['redshift']) / \
self.enzo_cosmology.TimeUnits
+ self.all_redshift_outputs.sort(key=lambda obj:obj['time'])
def _calculate_time_outputs(self):
"Calculate time outputs and their redshifts if cosmological."
@@ -401,27 +398,32 @@
self.all_time_outputs.append(output)
index += 1
- def _get_all_outputs(self):
+ def _get_all_outputs(self, find_outputs=False):
"Get all potential datasets and combine into a time-sorted list."
- if self.parameters['dtDataDump'] > 0 and \
- self.parameters['CycleSkipDataDump'] > 0:
+ # Create the set of outputs from which further selection will be done.
+ if find_outputs:
+ self._find_outputs()
+
+ elif self.parameters['dtDataDump'] > 0 and \
+ self.parameters['CycleSkipDataDump'] > 0:
mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set." % self.parameter_filename )
mylog.info(" Unable to calculate datasets. Attempting to search in the current directory")
- self.all_time_outputs = self._find_outputs()
+ self._find_outputs()
- # Get all time or cycle outputs.
- elif self.parameters['CycleSkipDataDump'] > 0:
- self._calculate_cycle_outputs()
else:
- self._calculate_time_outputs()
+ # Get all time or cycle outputs.
+ if self.parameters['CycleSkipDataDump'] > 0:
+ self._calculate_cycle_outputs()
+ else:
+ self._calculate_time_outputs()
- # Calculate times for redshift outputs.
- self._calculate_redshift_dump_times()
+ # Calculate times for redshift outputs.
+ self._calculate_redshift_dump_times()
- self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
- if self.parameters['CycleSkipDataDump'] <= 0:
- self.all_outputs.sort(key=lambda obj:obj['time'])
+ self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
+ if self.parameters['CycleSkipDataDump'] <= 0:
+ self.all_outputs.sort(key=lambda obj:obj['time'])
mylog.info("Total datasets: %d." % len(self.all_outputs))
@@ -503,14 +505,32 @@
"""
# look for time outputs.
- potential_outputs = glob.glob(os.path.join(self.parameters['GlobalDir'],
- "%s*" % self.parameters['DataDumpDir'])) + \
- glob.glob(os.path.join(self.parameters['GlobalDir'],
- "%s*" % self.parameters['RedshiftDumpDir']))
- time_outputs = []
- mylog.info("Checking %d potential time outputs." %
+ potential_time_outputs = \
+ glob.glob(os.path.join(self.parameters['GlobalDir'],
+ "%s*" % self.parameters['DataDumpDir']))
+ self.all_time_outputs = \
+ self._check_for_outputs(potential_time_outputs)
+ self.all_time_outputs.sort(key=lambda obj: obj['time'])
+
+ # look for redshift outputs.
+ potential_redshift_outputs = \
+ glob.glob(os.path.join(self.parameters['GlobalDir'],
+ "%s*" % self.parameters['RedshiftDumpDir']))
+ self.all_redshift_outputs = \
+ self._check_for_outputs(potential_redshift_outputs)
+ self.all_redshift_outputs.sort(key=lambda obj: obj['time'])
+
+ self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
+ self.all_outputs.sort(key=lambda obj: obj['time'])
+ mylog.info("Located %d total outputs." % len(self.all_outputs))
+
+ def _check_for_outputs(self, potential_outputs):
+ r"""Check a list of files to see if they are valid datasets."""
+
+ mylog.info("Checking %d potential outputs." %
len(potential_outputs))
+ my_outputs = []
for output in potential_outputs:
if self.parameters['DataDumpDir'] in output:
dir_key = self.parameters['DataDumpDir']
@@ -526,15 +546,14 @@
try:
pf = load(filename)
if pf is not None:
- time_outputs.append({'filename': filename, 'time': pf.current_time})
+ my_outputs.append({'filename': filename,
+ 'time': pf.current_time})
if pf.cosmological_simulation:
- time_outputs[-1]['redshift'] = pf.current_redshift
+ my_outputs[-1]['redshift'] = pf.current_redshift
except YTOutputNotIdentified:
mylog.error('Failed to load %s' % filename)
- mylog.info("Located %d time outputs." % len(time_outputs))
- time_outputs.sort(key=lambda obj: obj['time'])
- return time_outputs
+ return my_outputs
def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):
r"""Get datasets at or near to given values.
diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -788,7 +788,7 @@
return image
def snapshot(self, fn = None, clip_ratio = None, double_check = False,
- num_threads = 0, clim = None):
+ num_threads = 0, clim = None, label = None):
r"""Ray-cast the camera.
This method instructs the camera to take a snapshot -- i.e., call the ray
@@ -815,10 +815,10 @@
sampler = self.get_sampler(args)
self.volume.initialize_source()
image = self._render(double_check, num_threads, image, sampler)
- self.save_image(fn, clim, image)
+ self.save_image(fn, clim, image, label = label)
return image
- def save_image(self, fn, clim, image):
+ def save_image(self, fn, clim, image, label = None):
if self.comm.rank is 0 and fn is not None:
# This assumes Density; this is a relatively safe assumption.
import matplotlib.figure
@@ -832,7 +832,11 @@
ax = fig.add_subplot(1,1,1,projection='hammer')
implot = ax.imshow(img, extent=(-na.pi,na.pi,-na.pi/2,na.pi/2), clip_on=False, aspect=0.5)
cb = fig.colorbar(implot, orientation='horizontal')
- cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+
+ if label == None:
+ cb.set_label("Projected %s" % self.fields[0])
+ else:
+ cb.set_label(label)
if clim is not None: cb.set_clim(*clim)
ax.xaxis.set_ticks(())
ax.yaxis.set_ticks(())
https://bitbucket.org/yt_analysis/yt-3.0/changeset/53fbc2fa2d1b/
changeset: 53fbc2fa2d1b
branch: yt
user: ngoldbaum
date: 2012-08-16 02:06:19
summary: Updating the install script to point directly to bitbucket. Fixing an
error that caused some debug output to not be redirected to the
install log.
affected #: 1 file
diff -r 514f5a6a60c893b2a05f929150f352b153d68c65 -r 53fbc2fa2d1b7cf40dd1f01985833e7822ee04c1 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -599,11 +599,11 @@
elif [ ! -e yt-hg ]
then
YT_DIR="$PWD/yt-hg/"
- ( ${HG_EXEC} --debug clone http://hg.yt-project.org/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
+ ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
# Recently the hg server has had some issues with timeouts. In lieu of
# a new webserver, we are now moving to a three-stage process.
# First we clone the repo, but only up to r0.
- ( ${HG_EXEC} --debug clone http://hg.yt-project.org/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
+ ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
# Now we update to the branch we're interested in.
( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
elif [ -e yt-hg ]
@@ -682,7 +682,7 @@
echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
MY_PWD=`pwd`
cd $YT_DIR
-( ${HG_EXEC} pull && ${HG_EXEC} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
+( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
echo "Installing yt"
echo $HDF5_DIR > hdf5.cfg
https://bitbucket.org/yt_analysis/yt-3.0/changeset/2160220a810d/
changeset: 2160220a810d
branch: stable
user: ngoldbaum
date: 2012-08-16 02:06:19
summary: Updating the install script to point directly to bitbucket. Fixing an
error that caused some debug output to not be redirected to the
install log.
affected #: 1 file
diff -r e6227ef4338b6cc70de051951f2f17bdfb494ba6 -r 2160220a810d53ef32dcbd2b695fdbe626db9e98 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -597,11 +597,11 @@
elif [ ! -e yt-hg ]
then
YT_DIR="$PWD/yt-hg/"
- ( ${HG_EXEC} --debug clone http://hg.yt-project.org/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
+ ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
# Recently the hg server has had some issues with timeouts. In lieu of
# a new webserver, we are now moving to a three-stage process.
# First we clone the repo, but only up to r0.
- ( ${HG_EXEC} --debug clone http://hg.yt-project.org/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
+ ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
# Now we update to the branch we're interested in.
( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
elif [ -e yt-hg ]
@@ -679,7 +679,7 @@
echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
MY_PWD=`pwd`
cd $YT_DIR
-( ${HG_EXEC} pull && ${HG_EXEC} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
+( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
echo "Installing yt"
echo $HDF5_DIR > hdf5.cfg
https://bitbucket.org/yt_analysis/yt-3.0/changeset/7c5ad85490e8/
changeset: 7c5ad85490e8
branch: yt
user: xarthisius
date: 2012-08-16 13:14:26
summary: [gdf] do not modify cell width for non-existant dimension in 2D data
affected #: 1 file
diff -r 53fbc2fa2d1b7cf40dd1f01985833e7822ee04c1 -r 7c5ad85490e8ade384a165f1af51e1ef7cd9f692 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -8,7 +8,7 @@
Affiliation: KIPAC/SLAC/Stanford
Homepage: http://yt-project.org/
License:
- Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.
+ Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.
All Rights Reserved.
This file is part of yt.
@@ -79,7 +79,7 @@
class GDFHierarchy(AMRHierarchy):
grid = GDFGrid
-
+
def __init__(self, pf, data_style='grid_data_format'):
self.parameter_file = weakref.proxy(pf)
self.data_style = data_style
@@ -96,7 +96,7 @@
def _detect_fields(self):
self.field_list = self._fhandle['field_types'].keys()
-
+
def _setup_classes(self):
dd = self._get_data_reader_dict()
AMRHierarchy._setup_classes(self, dd)
@@ -104,14 +104,17 @@
def _count_grids(self):
self.num_grids = self._fhandle['/grid_parent_id'].shape[0]
-
+
def _parse_hierarchy(self):
- f = self._fhandle
- dxs=[]
+ f = self._fhandle
+ dxs = []
self.grids = na.empty(self.num_grids, dtype='object')
levels = (f['grid_level'][:]).copy()
glis = (f['grid_left_index'][:]).copy()
gdims = (f['grid_dimensions'][:]).copy()
+ active_dims = ~((na.max(gdims, axis=0) == 1) &
+ (self.parameter_file.domain_dimensions == 1))
+
for i in range(levels.shape[0]):
self.grids[i] = self.grid(i, self, levels[i],
glis[i],
@@ -120,7 +123,7 @@
dx = (self.parameter_file.domain_right_edge-
self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
- dx = dx/self.parameter_file.refine_by**(levels[i])
+ dx[active_dims] = dx[active_dims]/self.parameter_file.refine_by**(levels[i])
dxs.append(dx)
dx = na.array(dxs)
self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
@@ -128,7 +131,7 @@
self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
self.grid_particle_count = f['grid_particle_count'][:]
del levels, glis, gdims
-
+
def _populate_grid_objects(self):
for g in self.grids:
g._prepare_grid()
@@ -153,13 +156,13 @@
_hierarchy_class = GDFHierarchy
_fieldinfo_fallback = GDFFieldInfo
_fieldinfo_known = KnownGDFFields
-
+
def __init__(self, filename, data_style='grid_data_format',
storage_filename = None):
StaticOutput.__init__(self, filename, data_style)
self.storage_filename = storage_filename
self.filename = filename
-
+
def _set_units(self):
"""
Generates the conversion to various physical _units based on the parameter file
@@ -190,12 +193,12 @@
except:
current_fields_unit = ""
self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
- units=current_fields_unit, projected_units="",
+ units=current_fields_unit, projected_units="",
convert_function=_get_convert(field_name))
self._handle.close()
del self._handle
-
+
def _parse_parameter_file(self):
self._handle = h5py.File(self.parameter_filename, "r")
sp = self._handle["/simulation_parameters"].attrs
@@ -204,7 +207,7 @@
self.domain_dimensions = sp["domain_dimensions"][:]
refine_by = sp["refine_by"]
if refine_by is None: refine_by = 2
- self.refine_by = refine_by
+ self.refine_by = refine_by
self.dimensionality = sp["dimensionality"]
self.current_time = sp["current_time"]
self.unique_identifier = sp["unique_identifier"]
@@ -225,7 +228,7 @@
self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
self._handle.close()
del self._handle
-
+
@classmethod
def _is_valid(self, *args, **kwargs):
try:
@@ -238,4 +241,4 @@
def __repr__(self):
return self.basename.rsplit(".", 1)[0]
-
+
https://bitbucket.org/yt_analysis/yt-3.0/changeset/d6d392ef14aa/
changeset: d6d392ef14aa
branch: yt-3.0
user: MatthewTurk
date: 2012-08-20 21:08:30
summary: Merge
affected #: 24 files
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -201,6 +201,12 @@
echo "$ export CC=gcc-4.2"
echo "$ export CXX=g++-4.2"
echo
+ OSX_VERSION=`sw_vers -productVersion`
+ if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
+ then
+ MPL_SUPP_CFLAGS="${MPL_SUPP_CFLAGS} -mmacosx-version-min=10.7"
+ MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
+ fi
fi
if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
then
@@ -213,10 +219,11 @@
echo " * libncurses5"
echo " * libncurses5-dev"
echo " * zip"
+ echo " * uuid-dev"
echo
echo "You can accomplish this by executing:"
echo
- echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip"
+ echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev"
echo
fi
if [ ! -z "${CFLAGS}" ]
@@ -410,6 +417,7 @@
echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
+echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93 Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
# Individual processes
[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.7.tar.gz
@@ -430,6 +438,7 @@
get_ytproject h5py-2.0.1.tar.gz
get_ytproject Cython-0.16.tar.gz
get_ytproject reason-js-20120623.zip
+get_ytproject Forthon-0.8.10.tar.gz
if [ $INST_BZLIB -eq 1 ]
then
@@ -590,11 +599,11 @@
elif [ ! -e yt-hg ]
then
YT_DIR="$PWD/yt-hg/"
- ( ${HG_EXEC} --debug clone http://hg.yt-project.org/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
+ ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
# Recently the hg server has had some issues with timeouts. In lieu of
# a new webserver, we are now moving to a three-stage process.
# First we clone the repo, but only up to r0.
- ( ${HG_EXEC} --debug clone http://hg.yt-project.org/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
+ ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
# Now we update to the branch we're interested in.
( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
elif [ -e yt-hg ]
@@ -667,12 +676,13 @@
do_setup_py ipython-0.13
do_setup_py h5py-2.0.1
do_setup_py Cython-0.16
+do_setup_py Forthon-0.8.10
[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
MY_PWD=`pwd`
cd $YT_DIR
-( ${HG_EXEC} pull && ${HG_EXEC} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
+( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
echo "Installing yt"
echo $HDF5_DIR > hdf5.cfg
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -37,10 +37,11 @@
cosmological distance.
"""
- def __init__(self, parameter_filename, simulation_type):
+ def __init__(self, parameter_filename, simulation_type, find_outputs=False):
self.parameter_filename = parameter_filename
self.simulation_type = simulation_type
- self.simulation = simulation(parameter_filename, simulation_type)
+ self.simulation = simulation(parameter_filename, simulation_type,
+ find_outputs=find_outputs)
self.cosmology = Cosmology(
HubbleConstantNow=(100.0 * self.simulation.hubble_constant),
@@ -184,7 +185,23 @@
mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
(len(cosmology_splice), far_redshift, near_redshift))
-
+
+ # change the 'next' and 'previous' pointers to point to the correct outputs for the created
+ # splice
+ for i, output in enumerate(cosmology_splice):
+ if len(cosmology_splice) == 1:
+ output['previous'] = None
+ output['next'] = None
+ elif i == 0:
+ output['previous'] = None
+ output['next'] = cosmology_splice[i + 1]
+ elif i == len(cosmology_splice) - 1:
+ output['previous'] = cosmology_splice[i - 1]
+ output['next'] = None
+ else:
+ output['previous'] = cosmology_splice[i - 1]
+ output['next'] = cosmology_splice[i + 1]
+
self.splice_outputs.sort(key=lambda obj: obj['time'])
return cosmology_splice
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -57,7 +57,7 @@
use_minimum_datasets=True, deltaz_min=0.0,
minimum_coherent_box_fraction=0.0,
time_data=True, redshift_data=True,
- set_parameters=None,
+ find_outputs=False, set_parameters=None,
output_dir='LC', output_prefix='LightCone'):
"""
Initialize a LightCone object.
@@ -102,6 +102,10 @@
Whether or not to include redshift outputs when gathering
datasets for time series.
Default: True.
+ find_outputs : bool
+ Whether or not to search for parameter files in the current
+ directory.
+ Default: False.
set_parameters : dict
Dictionary of parameters to attach to pf.parameters.
Default: None.
@@ -150,7 +154,8 @@
only_on_root(os.mkdir, self.output_dir)
# Calculate light cone solution.
- CosmologySplice.__init__(self, parameter_filename, simulation_type)
+ CosmologySplice.__init__(self, parameter_filename, simulation_type,
+ find_outputs=find_outputs)
self.light_cone_solution = \
self.create_cosmology_splice(self.near_redshift, self.far_redshift,
minimal=self.use_minimum_datasets,
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -44,7 +44,8 @@
near_redshift, far_redshift,
use_minimum_datasets=True, deltaz_min=0.0,
minimum_coherent_box_fraction=0.0,
- time_data=True, redshift_data=True):
+ time_data=True, redshift_data=True,
+ find_outputs=False):
"""
Create a LightRay object. A light ray is much like a light cone,
in that it stacks together multiple datasets in order to extend a
@@ -93,6 +94,10 @@
Whether or not to include redshift outputs when gathering
datasets for time series.
Default: True.
+ find_outputs : bool
+ Whether or not to search for parameter files in the current
+ directory.
+ Default: False.
"""
@@ -106,7 +111,8 @@
self._data = {}
# Get list of datasets for light ray solution.
- CosmologySplice.__init__(self, parameter_filename, simulation_type)
+ CosmologySplice.__init__(self, parameter_filename, simulation_type,
+ find_outputs=find_outputs)
self.light_ray_solution = \
self.create_cosmology_splice(self.near_redshift, self.far_redshift,
minimal=self.use_minimum_datasets,
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -393,6 +393,7 @@
dt = na.maximum(dt, 0.0)
# Remove young stars
sub = dt >= self.min_age
+ if len(sub) == 0: return
self.star_metal = self.star_metal[sub]
dt = dt[sub]
self.star_creation_time = self.star_creation_time[sub]
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -112,7 +112,7 @@
f.close()
return proj
-def simulation(parameter_filename, simulation_type):
+def simulation(parameter_filename, simulation_type, find_outputs=False):
"""
Loads a simulation time series object of the specified
simulation type.
@@ -121,5 +121,6 @@
if simulation_type not in simulation_time_series_registry:
raise YTSimulationNotIdentified(simulation_type)
- return simulation_time_series_registry[simulation_type](parameter_filename)
+ return simulation_time_series_registry[simulation_type](parameter_filename,
+ find_outputs=find_outputs)
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -548,12 +548,13 @@
Parameters
----------
- width : width specifier
- This can either be a floating point value, in the native domain
- units of the simulation, or a tuple of the (value, unit) style.
- This will be the width of the FRB.
+ height : height specifier
+ This will be the physical height of the FRB, by default it is equal
+ to width. Note that this will not make any corrections to
+ resolution for the aspect ratio.
resolution : int or tuple of ints
- The number of pixels on a side of the final FRB.
+ The number of pixels on a side of the final FRB. If iterable, this
+ will be the width then the height.
height : height specifier
This will be the height of the FRB, by default it is equal to width.
center : array-like of floats, optional
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -320,7 +320,7 @@
simulation_time_series_registry[code_name] = cls
mylog.debug("Registering simulation: %s as %s", code_name, cls)
- def __init__(self, parameter_filename):
+ def __init__(self, parameter_filename, find_outputs=False):
"""
Base class for generating simulation time series types.
Principally consists of a *parameter_filename*.
@@ -345,7 +345,7 @@
self.print_key_parameters()
# Get all possible datasets.
- self._get_all_outputs()
+ self._get_all_outputs(find_outputs=find_outputs)
def __repr__(self):
return self.parameter_filename
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -383,6 +383,36 @@
function=_CellVolume,
convert_function=_ConvertCellVolumeCGS)
+def _ChandraEmissivity(field, data):
+ logT0 = na.log10(data["Temperature"]) - 7
+ return ((data["NumberDensity"].astype('float64')**2.0) \
+ *(10**(-0.0103*logT0**8 \
+ +0.0417*logT0**7 \
+ -0.0636*logT0**6 \
+ +0.1149*logT0**5 \
+ -0.3151*logT0**4 \
+ +0.6655*logT0**3 \
+ -1.1256*logT0**2 \
+ +1.0026*logT0**1 \
+ -0.6984*logT0) \
+ +data["Metallicity"]*10**(0.0305*logT0**11 \
+ -0.0045*logT0**10 \
+ -0.3620*logT0**9 \
+ +0.0513*logT0**8 \
+ +1.6669*logT0**7 \
+ -0.3854*logT0**6 \
+ -3.3604*logT0**5 \
+ +0.4728*logT0**4 \
+ +4.5774*logT0**3 \
+ -2.3661*logT0**2 \
+ -1.6667*logT0**1 \
+ -0.2193*logT0)))
+def _convertChandraEmissivity(data):
+ return 1.0 #1.0e-23*0.76**2
+add_field("ChandraEmissivity", function=_ChandraEmissivity,
+ convert_function=_convertChandraEmissivity,
+ projection_conversion="1")
+
def _XRayEmissivity(field, data):
return ((data["Density"].astype('float64')**2.0) \
*data["Temperature"]**0.5)
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -171,16 +171,16 @@
# We set up fields for both TotalEnergy and Total_Energy in the known fields
# lists. Note that this does not mean these will be the used definitions.
add_enzo_field("TotalEnergy", function=NullFunc,
- display_name = "\mathrm{Total}\/\mathrm{Energy}",
+ display_name = "\rm{Total}\/\rm{Energy}",
units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
add_enzo_field("Total_Energy", function=NullFunc,
- display_name = "\mathrm{Total}\/\mathrm{Energy}",
+ display_name = "\rm{Total}\/\rm{Energy}",
units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
def _Total_Energy(field, data):
return data["TotalEnergy"] / _convertEnergy(data)
add_field("Total_Energy", function=_Total_Energy,
- display_name = "\mathrm{Total}\/\mathrm{Energy}",
+ display_name = "\rm{Total}\/\rm{Energy}",
units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
def _NumberDensity(field, data):
@@ -247,7 +247,7 @@
for field in ['Bx','By','Bz']:
f = KnownEnzoFields[field]
f._convert_function=_convertBfield
- f._units=r"\mathrm{Gau\ss}"
+ f._units=r"\rm{Gauss}"
f.take_log=False
def _convertRadiation(data):
@@ -447,14 +447,14 @@
return data['star_creation_time']
def _ConvertEnzoTimeYears(data):
return data.pf.time_units['years']
-add_field('StarCreationTimeYears', units=r"\mathrm{yr}",
+add_field('StarCreationTimeYears', units=r"\rm{yr}",
function=_StarCreationTime,
convert_function=_ConvertEnzoTimeYears,
projection_conversion="1")
def _StarDynamicalTime(field, data):
return data['star_dynamical_time']
-add_field('StarDynamicalTimeYears', units=r"\mathrm{yr}",
+add_field('StarDynamicalTimeYears', units=r"\rm{yr}",
function=_StarDynamicalTime,
convert_function=_ConvertEnzoTimeYears,
projection_conversion="1")
@@ -466,7 +466,7 @@
data.pf.current_time - \
data['StarCreationTimeYears'][with_stars]
return star_age
-add_field('StarAgeYears', units=r"\mathrm{yr}",
+add_field('StarAgeYears', units=r"\rm{yr}",
function=_StarAge,
projection_conversion="1")
@@ -476,20 +476,12 @@
add_field('IsStarParticle', function=_IsStarParticle,
particle_type = True)
-def _convertBfield(data):
- return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
-for field in ['Bx','By','Bz']:
- f = KnownEnzoFields[field]
- f._convert_function=_convertBfield
- f._units=r"\mathrm{Gauss}"
- f.take_log=False
-
def _Bmag(field, data):
""" magnitude of bvec
"""
return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\mathrm{Gauss}")
+add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\rm{Gauss}")
# Particle functions
@@ -645,17 +637,3 @@
add_enzo_1d_field("z-velocity", function=_zvel)
add_enzo_1d_field("y-velocity", function=_yvel)
-def _convertBfield(data):
- return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
-for field in ['Bx','By','Bz']:
- f = KnownEnzoFields[field]
- f._convert_function=_convertBfield
- f._units=r"\mathrm{Gauss}"
- f.take_log=False
-
-def _Bmag(field, data):
- """ magnitude of bvec
- """
- return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
-
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\mathrm{Gauss}")
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -48,7 +48,7 @@
r"""Class for creating TimeSeriesData object from an Enzo
simulation parameter file.
"""
- def __init__(self, parameter_filename):
+ def __init__(self, parameter_filename, find_outputs=False):
r"""Initialize an Enzo Simulation object.
Upon creation, the parameter file is parsed and the time and redshift
@@ -59,6 +59,14 @@
parameter_filename : str
The simulation parameter file.
+ find_outputs : bool
+ If True, subdirectories within the GlobalDir directory are
+ searched one by one for datasets. Time and redshift
+ information are gathered by temporarily instantiating each
+ dataset. This can be used when simulation data was created
+ in a non-standard way, making it difficult to guess the
+ corresponding time and redshift information.
+ Default: False.
Examples
--------
@@ -67,14 +75,15 @@
>>> print es.all_outputs
"""
- SimulationTimeSeries.__init__(self, parameter_filename)
+ SimulationTimeSeries.__init__(self, parameter_filename,
+ find_outputs=find_outputs)
def get_time_series(self, time_data=True, redshift_data=True,
initial_time=None, final_time=None, time_units='1',
initial_redshift=None, final_redshift=None,
initial_cycle=None, final_cycle=None,
times=None, redshifts=None, tolerance=None,
- find_outputs=False, parallel=True):
+ parallel=True):
"""
Instantiate a TimeSeriesData object for a set of outputs.
@@ -145,14 +154,6 @@
given the requested times or redshifts. If None, the
nearest output is always taken.
Default: None.
- find_outputs : bool
- If True, subdirectories within the GlobalDir directory are
- searched one by one for datasets. Time and redshift
- information are gathered by temporarily instantiating each
- dataset. This can be used when simulation data was created
- in a non-standard way, making it difficult to guess the
- corresponding time and redshift information.
- Default: False.
parallel : bool/int
If True, the generated TimeSeriesData will divide the work
such that a single processor works on each dataset. If an
@@ -185,20 +186,15 @@
mylog.error('An initial or final redshift has been given for a noncosmological simulation.')
return
- # Create the set of outputs from which further selection will be done.
- if find_outputs:
- my_all_outputs = self._find_outputs()
-
+ if time_data and redshift_data:
+ my_all_outputs = self.all_outputs
+ elif time_data:
+ my_all_outputs = self.all_time_outputs
+ elif redshift_data:
+ my_all_outputs = self.all_redshift_outputs
else:
- if time_data and redshift_data:
- my_all_outputs = self.all_outputs
- elif time_data:
- my_all_outputs = self.all_time_outputs
- elif redshift_data:
- my_all_outputs = self.all_redshift_outputs
- else:
- mylog.error('Both time_data and redshift_data are False.')
- return
+ mylog.error('Both time_data and redshift_data are False.')
+ return
# Apply selection criteria to the set.
if times is not None:
@@ -354,6 +350,7 @@
for output in self.all_redshift_outputs:
output['time'] = self.enzo_cosmology.ComputeTimeFromRedshift(output['redshift']) / \
self.enzo_cosmology.TimeUnits
+ self.all_redshift_outputs.sort(key=lambda obj:obj['time'])
def _calculate_time_outputs(self):
"Calculate time outputs and their redshifts if cosmological."
@@ -401,27 +398,32 @@
self.all_time_outputs.append(output)
index += 1
- def _get_all_outputs(self):
+ def _get_all_outputs(self, find_outputs=False):
"Get all potential datasets and combine into a time-sorted list."
- if self.parameters['dtDataDump'] > 0 and \
- self.parameters['CycleSkipDataDump'] > 0:
+ # Create the set of outputs from which further selection will be done.
+ if find_outputs:
+ self._find_outputs()
+
+ elif self.parameters['dtDataDump'] > 0 and \
+ self.parameters['CycleSkipDataDump'] > 0:
mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set." % self.parameter_filename )
mylog.info(" Unable to calculate datasets. Attempting to search in the current directory")
- self.all_time_outputs = self._find_outputs()
+ self._find_outputs()
- # Get all time or cycle outputs.
- elif self.parameters['CycleSkipDataDump'] > 0:
- self._calculate_cycle_outputs()
else:
- self._calculate_time_outputs()
+ # Get all time or cycle outputs.
+ if self.parameters['CycleSkipDataDump'] > 0:
+ self._calculate_cycle_outputs()
+ else:
+ self._calculate_time_outputs()
- # Calculate times for redshift outputs.
- self._calculate_redshift_dump_times()
+ # Calculate times for redshift outputs.
+ self._calculate_redshift_dump_times()
- self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
- if self.parameters['CycleSkipDataDump'] <= 0:
- self.all_outputs.sort(key=lambda obj:obj['time'])
+ self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
+ if self.parameters['CycleSkipDataDump'] <= 0:
+ self.all_outputs.sort(key=lambda obj:obj['time'])
mylog.info("Total datasets: %d." % len(self.all_outputs))
@@ -503,14 +505,32 @@
"""
# look for time outputs.
- potential_outputs = glob.glob(os.path.join(self.parameters['GlobalDir'],
- "%s*" % self.parameters['DataDumpDir'])) + \
- glob.glob(os.path.join(self.parameters['GlobalDir'],
- "%s*" % self.parameters['RedshiftDumpDir']))
- time_outputs = []
- mylog.info("Checking %d potential time outputs." %
+ potential_time_outputs = \
+ glob.glob(os.path.join(self.parameters['GlobalDir'],
+ "%s*" % self.parameters['DataDumpDir']))
+ self.all_time_outputs = \
+ self._check_for_outputs(potential_time_outputs)
+ self.all_time_outputs.sort(key=lambda obj: obj['time'])
+
+ # look for redshift outputs.
+ potential_redshift_outputs = \
+ glob.glob(os.path.join(self.parameters['GlobalDir'],
+ "%s*" % self.parameters['RedshiftDumpDir']))
+ self.all_redshift_outputs = \
+ self._check_for_outputs(potential_redshift_outputs)
+ self.all_redshift_outputs.sort(key=lambda obj: obj['time'])
+
+ self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
+ self.all_outputs.sort(key=lambda obj: obj['time'])
+ mylog.info("Located %d total outputs." % len(self.all_outputs))
+
+ def _check_for_outputs(self, potential_outputs):
+ r"""Check a list of files to see if they are valid datasets."""
+
+ mylog.info("Checking %d potential outputs." %
len(potential_outputs))
+ my_outputs = []
for output in potential_outputs:
if self.parameters['DataDumpDir'] in output:
dir_key = self.parameters['DataDumpDir']
@@ -526,15 +546,14 @@
try:
pf = load(filename)
if pf is not None:
- time_outputs.append({'filename': filename, 'time': pf.current_time})
+ my_outputs.append({'filename': filename,
+ 'time': pf.current_time})
if pf.cosmological_simulation:
- time_outputs[-1]['redshift'] = pf.current_redshift
+ my_outputs[-1]['redshift'] = pf.current_redshift
except YTOutputNotIdentified:
mylog.error('Failed to load %s' % filename)
- mylog.info("Located %d time outputs." % len(time_outputs))
- time_outputs.sort(key=lambda obj: obj['time'])
- return time_outputs
+ return my_outputs
def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):
r"""Get datasets at or near to given values.
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -115,15 +115,9 @@
self.grid_left_edge[:,i] = DLE[i]
self.grid_right_edge[:,i] = DRE[i]
# We only go up to ND for 2D datasets
- if (f["/bounding box"][:,:,0].shape[1] == ND) :
- #FLASH 2/3 2D data
- self.grid_left_edge[:,:ND] = f["/bounding box"][:,:,0]
- self.grid_right_edge[:,:ND] = f["/bounding box"][:,:,1]
- else:
- self.grid_left_edge[:,:] = f["/bounding box"][:,:,0]
- self.grid_right_edge[:,:] = f["/bounding box"][:,:,1]
-
-
+ self.grid_left_edge[:,:ND] = f["/bounding box"][:,:ND,0]
+ self.grid_right_edge[:,:ND] = f["/bounding box"][:,:ND,1]
+
# Move this to the parameter file
try:
nxb = pf.parameters['nxb']
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -8,7 +8,7 @@
Affiliation: KIPAC/SLAC/Stanford
Homepage: http://yt-project.org/
License:
- Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.
+ Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.
All Rights Reserved.
This file is part of yt.
@@ -38,7 +38,7 @@
from yt.data_objects.static_output import \
StaticOutput
from yt.utilities.definitions import \
- sec_conversion
+ mpc_conversion, sec_conversion
from .fields import GDFFieldInfo, KnownGDFFields
from yt.data_objects.field_info_container import \
@@ -79,7 +79,7 @@
class GDFHierarchy(GridGeometryHandler):
grid = GDFGrid
-
+
def __init__(self, pf, data_style='grid_data_format'):
self.parameter_file = weakref.proxy(pf)
self.data_style = data_style
@@ -96,7 +96,7 @@
def _detect_fields(self):
self.field_list = self._fhandle['field_types'].keys()
-
+
def _setup_classes(self):
dd = self._get_data_reader_dict()
GridGeometryHandler._setup_classes(self, dd)
@@ -104,14 +104,17 @@
def _count_grids(self):
self.num_grids = self._fhandle['/grid_parent_id'].shape[0]
-
+
def _parse_hierarchy(self):
- f = self._fhandle
- dxs=[]
+ f = self._fhandle
+ dxs = []
self.grids = na.empty(self.num_grids, dtype='object')
levels = (f['grid_level'][:]).copy()
glis = (f['grid_left_index'][:]).copy()
gdims = (f['grid_dimensions'][:]).copy()
+ active_dims = ~((na.max(gdims, axis=0) == 1) &
+ (self.parameter_file.domain_dimensions == 1))
+
for i in range(levels.shape[0]):
self.grids[i] = self.grid(i, self, levels[i],
glis[i],
@@ -120,7 +123,7 @@
dx = (self.parameter_file.domain_right_edge-
self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
- dx = dx/self.parameter_file.refine_by**(levels[i])
+ dx[active_dims] = dx[active_dims]/self.parameter_file.refine_by**(levels[i])
dxs.append(dx)
dx = na.array(dxs)
self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
@@ -128,7 +131,7 @@
self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
self.grid_particle_count = f['grid_particle_count'][:]
del levels, glis, gdims
-
+
def _populate_grid_objects(self):
for g in self.grids:
g._prepare_grid()
@@ -153,13 +156,13 @@
_hierarchy_class = GDFHierarchy
_fieldinfo_fallback = GDFFieldInfo
_fieldinfo_known = KnownGDFFields
-
+
def __init__(self, filename, data_style='grid_data_format',
storage_filename = None):
StaticOutput.__init__(self, filename, data_style)
self.storage_filename = storage_filename
self.filename = filename
-
+
def _set_units(self):
"""
Generates the conversion to various physical _units based on the parameter file
@@ -172,6 +175,8 @@
self.units['1'] = 1.0
self.units['cm'] = 1.0
self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+ for unit in mpc_conversion.keys():
+ self.units[unit] = 1.0 * mpc_conversion[unit] / mpc_conversion["cm"]
for unit in sec_conversion.keys():
self.time_units[unit] = 1.0 / sec_conversion[unit]
@@ -188,12 +193,12 @@
except:
current_fields_unit = ""
self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
- units=current_fields_unit, projected_units="",
+ units=current_fields_unit, projected_units="",
convert_function=_get_convert(field_name))
self._handle.close()
del self._handle
-
+
def _parse_parameter_file(self):
self._handle = h5py.File(self.parameter_filename, "r")
sp = self._handle["/simulation_parameters"].attrs
@@ -202,7 +207,7 @@
self.domain_dimensions = sp["domain_dimensions"][:]
refine_by = sp["refine_by"]
if refine_by is None: refine_by = 2
- self.refine_by = refine_by
+ self.refine_by = refine_by
self.dimensionality = sp["dimensionality"]
self.current_time = sp["current_time"]
self.unique_identifier = sp["unique_identifier"]
@@ -223,7 +228,7 @@
self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
self._handle.close()
del self._handle
-
+
@classmethod
def _is_valid(self, *args, **kwargs):
try:
@@ -236,4 +241,4 @@
def __repr__(self):
return self.basename.rsplit(".", 1)[0]
-
+
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -28,7 +28,8 @@
StreamGrid, \
StreamHierarchy, \
StreamStaticOutput, \
- StreamHandler
+ StreamHandler, \
+ load_uniform_grid
from .fields import \
KnownStreamFields, \
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -40,6 +40,8 @@
FieldInfoContainer, NullFunc
from yt.utilities.lib import \
get_box_grids_level
+from yt.utilities.definitions import \
+ mpc_conversion, sec_conversion
from .fields import \
StreamFieldInfo, \
@@ -288,3 +290,95 @@
@classmethod
def _is_valid(cls, *args, **kwargs):
return False
+
+class StreamDictFieldHandler(dict):
+
+ @property
+ def all_fields(self): return self[0].keys()
+
+def load_uniform_grid(data, domain_dimensions, domain_size_in_cm,
+ sim_time=0.0, number_of_particles=0):
+ r"""Load a uniform grid of data into yt as a
+ :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+ This should allow a uniform grid of data to be loaded directly into yt and
+ analyzed as would any others. This comes with several caveats:
+ * Units will be incorrect unless the data has already been converted to
+ cgs.
+ * Some functions may behave oddly, and parallelism will be
+ disappointing or non-existent in most cases.
+ * Particles may be difficult to integrate.
+
+ Parameters
+ ----------
+ data : dict
+ This is a dict of numpy arrays, where the keys are the field names.
+ domain_dimensiosn : array_like
+ This is the domain dimensions of the grid
+ domain_size_in_cm : float
+ The size of the domain, in centimeters
+ sim_time : float, optional
+ The simulation time in seconds
+ number_of_particles : int, optional
+ If particle fields are included, set this to the number of particles
+
+ Examples
+ --------
+
+ >>> arr = na.random.random((256, 256, 256))
+ >>> data = dict(Density = arr)
+ >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
+
+ """
+ sfh = StreamDictFieldHandler()
+ sfh.update({0:data})
+ domain_dimensions = na.array(domain_dimensions)
+ if na.unique(domain_dimensions).size != 1:
+ print "We don't support variably sized domains yet."
+ raise RuntimeError
+ domain_left_edge = na.zeros(3, 'float64')
+ domain_right_edge = na.ones(3, 'float64')
+ grid_left_edges = na.zeros(3, "int64").reshape((1,3))
+ grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
+
+ grid_levels = na.array([0], dtype='int32').reshape((1,1))
+ grid_dimensions = grid_right_edges - grid_left_edges
+
+ grid_left_edges = grid_left_edges.astype("float64")
+ grid_left_edges /= domain_dimensions*2**grid_levels
+ grid_left_edges *= domain_right_edge - domain_left_edge
+ grid_left_edges += domain_left_edge
+
+ grid_right_edges = grid_right_edges.astype("float64")
+ grid_right_edges /= domain_dimensions*2**grid_levels
+ grid_right_edges *= domain_right_edge - domain_left_edge
+ grid_right_edges += domain_left_edge
+
+ handler = StreamHandler(
+ grid_left_edges,
+ grid_right_edges,
+ grid_dimensions,
+ grid_levels,
+ na.array([-1], dtype='int64'),
+ number_of_particles*na.ones(1, dtype='int64').reshape((1,1)),
+ na.zeros(1).reshape((1,1)),
+ sfh,
+ )
+
+ handler.name = "UniformGridData"
+ handler.domain_left_edge = domain_left_edge
+ handler.domain_right_edge = domain_right_edge
+ handler.refine_by = 2
+ handler.dimensionality = 3
+ handler.domain_dimensions = domain_dimensions
+ handler.simulation_time = sim_time
+ handler.cosmology_simulation = 0
+
+ spf = StreamStaticOutput(handler)
+ spf.units["cm"] = domain_size_in_cm
+ spf.units['1'] = 1.0
+ spf.units["unitary"] = 1.0
+ box_in_mpc = domain_size_in_cm / mpc_conversion['cm']
+ for unit in mpc_conversion.keys():
+ spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+ return spf
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -98,8 +98,8 @@
#from yt.frontends.art.api import \
# ARTStaticOutput, ARTFieldInfo, add_art_field
-from yt.frontends.maestro.api import \
- MaestroStaticOutput, MaestroFieldInfo, add_maestro_field
+#from yt.frontends.maestro.api import \
+# MaestroStaticOutput, MaestroFieldInfo, add_maestro_field
from yt.analysis_modules.list_modules import \
get_available_modules, amods
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1236,7 +1236,7 @@
plt = SlicePlot(pf, ax, args.field, center=center,
width=width)
if args.grids:
- plt.draw_grids()
+ plt.annotate_grids()
if args.time:
time = pf.current_time*pf['Time']*pf['years']
plt.annotate_text((0.2,0.8), 't = %5.2e yr'%time)
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/utilities/kdtree/Makefile
--- a/yt/utilities/kdtree/Makefile
+++ b/yt/utilities/kdtree/Makefile
@@ -9,9 +9,10 @@
endif
fKD: fKD.f90 fKD.v fKD_source.f90
-# Forthon --compile_first fKD_source --no2underscores --with-numpy -g fKD fKD.f90 fKD_source.f90
+# Forthon --compile_first fKD_source --no2underscores -g fKD fKD.f90 fKD_source.f90
@echo "Using $(FORTHON) ($(FORTHON_EXE))"
- $(FORTHON) -F gfortran --compile_first fKD_source --no2underscores --with-numpy --fopt "-O3" fKD fKD_source.f90
+ $(FORTHON) -F gfortran --compile_first fKD_source --no2underscores --fopt "-O3" fKD fKD_source.f90
+ mv build/lib*/fKDpy.so .
clean:
rm -rf build fKDpy.a fKDpy.so
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -99,6 +99,8 @@
for i in metadata:
if isinstance(metadata[i], na.ndarray):
metadata[i] = metadata[i].tolist()
+ elif hasattr(metadata[i], 'dtype'):
+ metadata[i] = na.asscalar(metadata[i])
metadata['obj_type'] = self.type
if len(chunks) == 0:
chunk_info = {'chunks': []}
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -179,8 +179,7 @@
center = na.dot(mat,center)
width = width/pf.domain_width.min()
- bounds = [center[0]-width[0]/2,center[0]+width[0]/2,
- center[1]-width[1]/2,center[1]+width[1]/2]
+ bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
return (bounds,center)
@@ -206,7 +205,7 @@
Parameters
----------
- data_source : :class:`yt.data_objects.data_containers.YTOverlapProjBase` or :class:`yt.data_objects.data_containers.YTSliceBase`
+ data_source : :class:`yt.data_objects.data_containers.AMRProjBase` or :class:`yt.data_objects.data_containers.AMRSliceBase`
This is the source to be pixelized, which can be a projection or a
slice. (For cutting planes, see
`yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`.)
@@ -460,10 +459,15 @@
Log on/off.
"""
- if log:
- self._field_transform[field] = log_transform
+ if field == 'all':
+ fields = self.plots.keys()
else:
- self._field_transform[field] = linear_transform
+ fields = [field]
+ for field in fields:
+ if log:
+ self._field_transform[field] = log_transform
+ else:
+ self._field_transform[field] = linear_transform
@invalidate_plot
def set_transform(self, field, name):
@@ -474,34 +478,70 @@
@invalidate_plot
def set_cmap(self, field, cmap_name):
"""set the colormap for one of the fields
-
+
Parameters
----------
field : string
- the field to set a transform
+ the field to set the colormap
+ if field == 'all', applies to all plots.
cmap_name : string
name of the colormap
"""
- self._colorbar_valid = False
- self._colormaps[field] = cmap_name
+
+ if field is 'all':
+ fields = self.plots.keys()
+ else:
+ fields = [field]
+ for field in fields:
+ self._colorbar_valid = False
+ self._colormaps[field] = cmap_name
@invalidate_plot
- def set_zlim(self, field, zmin, zmax):
+ def set_zlim(self, field, zmin, zmax, dynamic_range=None):
"""set the scale of the colormap
-
+
Parameters
----------
field : string
- the field to set a transform
+ the field to set a colormap scale
+ if field == 'all', applies to all plots.
zmin : float
- the new minimum of the colormap scale
+ the new minimum of the colormap scale. If 'min', will
+ set to the minimum value in the current view.
zmax : float
- the new maximum of the colormap scale
+ the new maximum of the colormap scale. If 'max', will
+ set to the maximum value in the current view.
+
+ Keyword Parameters
+ ------------------
+ dyanmic_range : float (default: None)
+ The dynamic range of the image.
+ If zmin == None, will set zmin = zmax / dynamic_range
+ If zmax == None, will set zmax = zmin * dynamic_range
+ When dynamic_range is specified, defaults to setting
+ zmin = zmax / dynamic_range.
"""
- self.plots[field].zmin = zmin
- self.plots[field].zmax = zmax
+ if field is 'all':
+ fields = self.plots.keys()
+ else:
+ fields = [field]
+ for field in fields:
+ myzmin = zmin
+ myzmax = zmax
+ if zmin == 'min':
+ myzmin = self.plots[field].image._A.min()
+ if zmax == 'max':
+ myzmax = self.plots[field].image._A.max()
+ if dynamic_range is not None:
+ if zmax is None:
+ myzmax = myzmin * dynamic_range
+ else:
+ myzmin = myzmax / dynamic_range
+
+ self.plots[field].zmin = myzmin
+ self.plots[field].zmax = myzmax
def setup_callbacks(self):
for key in callback_registry:
@@ -514,7 +554,7 @@
callback = invalidate_plot(apply_callback(CallbackMaker))
callback.__doc__ = CallbackMaker.__init__.__doc__
self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
-
+
def get_metadata(self, field, strip_mathml = True, return_string = True):
fval = self._frb[field]
mi = fval.min()
@@ -554,7 +594,7 @@
finfo = self.data_source._get_field_info(*field)
if ds._type_name in ("slice", "cutting"):
units = finfo.get_units()
- if ds._type_name == "proj" and (ds.weight_field is not None or
+ elif ds._type_name == "proj" and (ds.weight_field is not None or
ds.proj_style == "mip"):
units = finfo.get_units()
elif ds._type_name == "proj":
@@ -656,25 +696,32 @@
@invalidate_plot
def set_cmap(self, field, cmap):
"""set the colormap for one of the fields
-
+
Parameters
----------
field : string
the field to set a transform
+ if field == 'all', applies to all plots.
cmap_name : string
name of the colormap
"""
- self._colorbar_valid = False
- self._colormaps[field] = cmap
- if isinstance(cmap, types.StringTypes):
- if str(cmap) in yt_colormaps:
- cmap = yt_colormaps[str(cmap)]
- elif hasattr(matplotlib.cm, cmap):
- cmap = getattr(matplotlib.cm, cmap)
- if not is_colormap(cmap) and cmap is not None:
- raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
- self.plots[field].image.set_cmap(cmap)
+ if field == 'all':
+ fields = self.plots.keys()
+ else:
+ fields = [field]
+
+ for field in fields:
+ self._colorbar_valid = False
+ self._colormaps[field] = cmap
+ if isinstance(cmap, types.StringTypes):
+ if str(cmap) in yt_colormaps:
+ cmap = yt_colormaps[str(cmap)]
+ elif hasattr(matplotlib.cm, cmap):
+ cmap = getattr(matplotlib.cm, cmap)
+ if not is_colormap(cmap) and cmap is not None:
+ raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
+ self.plots[field].image.set_cmap(cmap)
def save(self,name=None):
"""saves the plot to disk.
@@ -768,7 +815,7 @@
the image centers on the location of the maximum density
cell. If set to 'c' or 'center', the plot is centered on
the middle of the domain.
- width : tuple or a float.
+ width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
@@ -787,7 +834,7 @@
the y axis. In the other two examples, code units are assumed, for example
(0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3
in code units.
- origin : string
+ origin : string
The location of the origin of the plot coordinate system.
Currently, can be set to three options: 'left-domain', corresponding
to the bottom-left hand corner of the simulation domain, 'center-domain',
@@ -837,7 +884,7 @@
the image centers on the location of the maximum density
cell. If set to 'c' or 'center', the plot is centered on
the middle of the domain.
- width : tuple or a float.
+ width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
diff -r 96e6428dc1755f5346183b7f65b32857333d7ff0 -r d6d392ef14aa14222fd1a47e654213d38a031378 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -788,7 +788,7 @@
return image
def snapshot(self, fn = None, clip_ratio = None, double_check = False,
- num_threads = 0, clim = None):
+ num_threads = 0, clim = None, label = None):
r"""Ray-cast the camera.
This method instructs the camera to take a snapshot -- i.e., call the ray
@@ -815,10 +815,10 @@
sampler = self.get_sampler(args)
self.volume.initialize_source()
image = self._render(double_check, num_threads, image, sampler)
- self.save_image(fn, clim, image)
+ self.save_image(fn, clim, image, label = label)
return image
- def save_image(self, fn, clim, image):
+ def save_image(self, fn, clim, image, label = None):
if self.comm.rank is 0 and fn is not None:
# This assumes Density; this is a relatively safe assumption.
import matplotlib.figure
@@ -832,7 +832,11 @@
ax = fig.add_subplot(1,1,1,projection='hammer')
implot = ax.imshow(img, extent=(-na.pi,na.pi,-na.pi/2,na.pi/2), clip_on=False, aspect=0.5)
cb = fig.colorbar(implot, orientation='horizontal')
- cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+
+ if label == None:
+ cb.set_label("Projected %s" % self.fields[0])
+ else:
+ cb.set_label(label)
if clim is not None: cb.set_clim(*clim)
ax.xaxis.set_ticks(())
ax.yaxis.set_ticks(())
@@ -1495,7 +1499,7 @@
return image[:,0,0]
def plot_allsky_healpix(image, nside, fn, label = "", rotation = None,
- take_log = True, resolution=512):
+ take_log = True, resolution=512, cmin=None, cmax=None):
import matplotlib.figure
import matplotlib.backends.backend_agg
if rotation is None: rotation = na.eye(3).astype("float64")
@@ -1507,7 +1511,7 @@
if take_log: func = na.log10
else: func = lambda a: a
implot = ax.imshow(func(img), extent=(-na.pi,na.pi,-na.pi/2,na.pi/2),
- clip_on=False, aspect=0.5)
+ clip_on=False, aspect=0.5, vmin=cmin, vmax=cmax)
cb = fig.colorbar(implot, orientation='horizontal')
cb.set_label(label)
ax.xaxis.set_ticks(())
Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list