[yt-svn] commit/yt: ngoldbaum: Merged in ngoldbaum/yt (pull request #2285)
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Wed Jul 20 10:34:15 PDT 2016
1 new commit in yt:
https://bitbucket.org/yt_analysis/yt/commits/f2508397629e/
Changeset: f2508397629e
Branch: yt
User: ngoldbaum
Date: 2016-07-20 17:33:43+00:00
Summary: Merged in ngoldbaum/yt (pull request #2285)
Fix issues with opaque sources fading volume renderings. Closes #1129. Closes #1202. Closes #1194.
Affected #: 14 files
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -303,6 +303,26 @@
.. yt_cookbook:: vol-annotated.py
+.. _cookbook-vol-points:
+
+Volume Rendering with Points
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with point
+sources. This could represent star or dark matter particles, for example.
+
+.. yt_cookbook:: vol-points.py
+
+.. _cookbook-vol-lines:
+
+Volume Rendering with Lines
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with line
+sources.
+
+.. yt_cookbook:: vol-lines.py
+
.. _cookbook-opengl_vr:
Advanced Interactive Data Visualization
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -1,22 +1,20 @@
import yt
-import numpy as np
-from yt.visualization.volume_rendering.api import BoxSource, CoordinateVectorSource
# Load the dataset.
ds = yt.load("Enzo_64/DD0043/data0043")
-sc = yt.create_scene(ds, ('gas','density'))
-sc.get_source(0).transfer_function.grey_opacity=True
+sc = yt.create_scene(ds, ('gas', 'density'))
-sc.annotate_domain(ds)
-sc.render()
-sc.save("%s_vr_domain.png" % ds)
+# You may need to adjust the alpha values to get a rendering with good contrast
+# For annotate_domain, the fourth color value is alpha.
-sc.annotate_grids(ds)
-sc.render()
-sc.save("%s_vr_grids.png" % ds)
+# Draw the domain boundary
+sc.annotate_domain(ds, color=[1, 1, 1, 0.01])
+sc.save("%s_vr_domain.png" % ds, sigma_clip=4)
-# Here we can draw the coordinate vectors on top of the image by processing
-# it through the camera. Then save it out.
-sc.annotate_axes()
-sc.render()
-sc.save("%s_vr_coords.png" % ds)
+# Draw the grid boundaries
+sc.annotate_grids(ds, alpha=0.01)
+sc.save("%s_vr_grids.png" % ds, sigma_clip=4)
+
+# Draw a coordinate axes triad
+sc.annotate_axes(alpha=0.01)
+sc.save("%s_vr_coords.png" % ds, sigma_clip=4)
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f doc/source/cookbook/vol-annotated.py
--- a/doc/source/cookbook/vol-annotated.py
+++ b/doc/source/cookbook/vol-annotated.py
@@ -1,75 +1,29 @@
-#!/usr/bin/env python
+import yt
-import numpy as np
-import pylab
+ds = yt.load('Enzo_64/DD0043/data0043')
-import yt
-import yt.visualization.volume_rendering.old_camera as vr
+sc = yt.create_scene(ds, lens_type='perspective')
-ds = yt.load("maestro_subCh_plt00248")
+source = sc[0]
-dd = ds.all_data()
+source.set_field('density')
+source.set_log(True)
-# field in the dataset we will visualize
-field = ('boxlib', 'radial_velocity')
+# Set up the camera parameters: focus, width, resolution, and image orientation
+sc.camera.focus = ds.domain_center
+sc.camera.resolution = 1024
+sc.camera.north_vector = [0, 0, 1]
+sc.camera.position = [1.7, 1.7, 1.7]
-# the values we wish to highlight in the rendering. We'll put a Gaussian
-# centered on these with width sigma
-vals = [-1.e7, -5.e6, -2.5e6, 2.5e6, 5.e6, 1.e7]
-sigma = 2.e5
+# You may need to adjust the alpha values to get an image with good contrast.
+# For the annotate_domain call, the fourth value in the color tuple is the
+# alpha value.
+sc.annotate_axes(alpha=.02)
+sc.annotate_domain(ds, color=[1, 1, 1, .01])
-mi, ma = min(vals), max(vals)
+text_string = "T = {} Gyr".format(float(ds.current_time.to('Gyr')))
-# Instantiate the ColorTransferfunction.
-tf = yt.ColorTransferFunction((mi, ma))
-
-for v in vals:
- tf.sample_colormap(v, sigma**2, colormap="coolwarm")
-
-
-# volume rendering requires periodic boundaries. This dataset has
-# solid walls. We need to hack it for now (this will be fixed in
-# a later yt)
-ds.periodicity = (True, True, True)
-
-
-# Set up the camera parameters: center, looking direction, width, resolution
-c = np.array([0.0, 0.0, 0.0])
-L = np.array([1.0, 1.0, 1.2])
-W = 1.5*ds.domain_width
-N = 720
-
-# +z is "up" for our dataset
-north=[0.0,0.0,1.0]
-
-# Create a camera object
-cam = vr.Camera(c, L, W, N, transfer_function=tf, ds=ds,
- no_ghost=False, north_vector=north,
- fields = [field], log_fields = [False])
-
-im = cam.snapshot()
-
-# add an axes triad
-cam.draw_coordinate_vectors(im)
-
-# add the domain box to the image
-nim = cam.draw_domain(im)
-
-# increase the contrast -- for some reason, the enhance default
-# to save_annotated doesn't do the trick
-max_val = im[:,:,:3].std() * 4.0
-nim[:,:,:3] /= max_val
-
-# we want to write the simulation time on the figure, so create a
-# figure and annotate it
-f = pylab.figure()
-
-pylab.text(0.2, 0.85, "{:.3g} s".format(float(ds.current_time.d)),
- transform=f.transFigure, color="white")
-
-# tell the camera to use our figure
-cam._render_figure = f
-
-# save annotated -- this added the transfer function values,
-# and the clear_fig=False ensures it writes onto our existing figure.
-cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)
+# save an annotated version of the volume rendering including a representation
+# of the transfer function and a nice label showing the simulation time.
+sc.save_annotated("vol_annotated.png", sigma_clip=6,
+ text_annotate=[[(.1, 1.05), text_string]])
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f doc/source/cookbook/vol-lines.py
--- /dev/null
+++ b/doc/source/cookbook/vol-lines.py
@@ -0,0 +1,22 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import LineSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+nlines = 50
+vertices = (np.random.random([nlines, 2, 3]) - 0.5) * 200 * kpc
+colors = np.random.random([nlines, 4])
+colors[:, 3] = 0.1
+
+lines = LineSource(vertices, colors)
+sc.add_source(lines)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=4.0)
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f doc/source/cookbook/vol-points.py
--- /dev/null
+++ b/doc/source/cookbook/vol-points.py
@@ -0,0 +1,29 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import PointSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+npoints = 1000
+
+# Random particle positions
+vertices = np.random.random([npoints, 3])*200*kpc
+
+# Random colors
+colors = np.random.random([npoints, 4])
+
+# Set alpha value to something that produces a good contrast with the volume
+# rendering
+colors[:, 3] = 0.1
+
+points = PointSource(vertices, colors=colors)
+sc.add_source(points)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=5)
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -129,6 +129,9 @@
vertices. For instance, lines can be used to draw outlines of regions or
continents.
+Worked examples of using the ``LineSource`` and ``PointSource`` are available at
+:ref:`cookbook-vol-points` and :ref:`cookbook-vol-lines`.
+
.. _volume_rendering_annotations:
Annotations
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f setup.py
--- a/setup.py
+++ b/setup.py
@@ -32,14 +32,6 @@
except pkg_resources.DistributionNotFound:
pass # yay!
-setuptools_ver = \
- LooseVersion(pkg_resources.get_distribution("setuptools").version)
-if setuptools_ver < LooseVersion("18.0"):
- print("Your setuptools version is too old to properly handle cython extensions.")
- print("Please update setuptools before proceeding:")
- print(" pip install -U setuptools")
- sys.exit(1)
-
MAPSERVER_FILES = []
MAPSERVER_DIRS = [
"",
@@ -378,11 +370,11 @@
package_data = {'':['*.pxd']},
setup_requires=[
'numpy',
- 'cython>=0.22',
+ 'cython>=0.24',
],
install_requires=[
'matplotlib',
- 'setuptools>=18.0',
+ 'setuptools>=19.6',
'sympy',
'numpy',
'IPython',
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -50,7 +50,7 @@
local_tipsy_001:
- yt/frontends/tipsy/tests/test_outputs.py
- local_varia_001:
+ local_varia_002:
- yt/analysis_modules/radmc3d_export
- yt/frontends/moab/tests/test_c5.py
- yt/analysis_modules/photon_simulator/tests/test_spectra.py
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -57,9 +57,9 @@
cdef np.float64_t tl, temp_x, temp_y = -1
if max_t > 1.0: max_t = 1.0
direction = -1
- if vc.left_edge[0] <= v_pos[0] and v_pos[0] <= vc.right_edge[0] and \
- vc.left_edge[1] <= v_pos[1] and v_pos[1] <= vc.right_edge[1] and \
- vc.left_edge[2] <= v_pos[2] and v_pos[2] <= vc.right_edge[2]:
+ if vc.left_edge[0] <= v_pos[0] and v_pos[0] < vc.right_edge[0] and \
+ vc.left_edge[1] <= v_pos[1] and v_pos[1] < vc.right_edge[1] and \
+ vc.left_edge[2] <= v_pos[2] and v_pos[2] < vc.right_edge[2]:
intersect_t = 0.0
direction = 3
for i in range(3):
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -27,6 +27,7 @@
from libc.string cimport strcmp
from cython.view cimport memoryview
+from cython.view cimport array as cvarray
from cpython cimport buffer
@@ -327,11 +328,12 @@
cdef int nx = image.shape[0]
cdef int ny = image.shape[1]
cdef int nl = xs.shape[0]
- cdef np.float64_t alpha[4]
- cdef int i, j
+ cdef np.float64_t[:] alpha
+ cdef int i, j, c
cdef int dx, dy, sx, sy, e2, err
cdef np.int64_t x0, x1, y0, y1, yi0
cdef np.float64_t z0, z1, dzx, dzy
+ alpha = np.zeros(4)
for j in range(0, nl, 2):
# From wikipedia http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
x0 = xs[j]
@@ -348,8 +350,11 @@
if crop == 1 and (dx > nx/2.0 or dy > ny/2.0):
continue
- for i in range(4):
- alpha[i] = colors[j/points_per_color, i]
+ c = j/points_per_color/2
+
+ for i in range(3):
+ alpha[i] = colors[c, i] * colors[c, 3]
+ alpha[3] = colors[c, 3]
if x0 < x1:
sx = 1
@@ -417,19 +422,22 @@
cdef int nx = image.shape[0]
cdef int ny = image.shape[1]
cdef int nl = xs.shape[0]
- cdef np.float64_t alpha[4]
+ cdef np.float64_t[:] alpha
cdef np.float64_t talpha
- cdef int i, j
+ cdef int i, j, c
cdef np.int64_t x0, y0, yi0
cdef np.float64_t z0
+ alpha = np.zeros(4)
for j in range(0, nl):
x0 = xs[j]
y0 = ys[j]
z0 = zs[j]
if (x0 < 0 or x0 >= nx): continue
if (y0 < 0 or y0 >= ny): continue
- for i in range(4):
- alpha[i] = colors[j/points_per_color, i]
+ c = j/points_per_color
+ for i in range(3):
+ alpha[i] = colors[c, i] * colors[c, 3]
+ alpha[3] = colors[c, 3]
if flip:
yi0 = ny - y0
else:
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -471,10 +471,10 @@
image.shape = camera.resolution[0], camera.resolution[1], 4
# If the call is from VR, the image is rotated by 180 to get correct
# up direction
- if call_from_VR is True:
+ if call_from_VR is True:
image = np.rot90(image, k=2)
if self.transfer_function.grey_opacity is False:
- image[:, :, 3] = 1.0
+ image[:, :, 3] = 1
return image
def __repr__(self):
@@ -811,8 +811,8 @@
Parameters
----------
positions: array, shape (N, 3)
- These positions, in data-space coordinates, are the points to be
- added to the scene.
+ The positions of points to be added to the scene. If specified with no
+ units, the positions will be assumed to be in code units.
colors : array, shape (N, 4), optional
The colors of the points, including an alpha channel, in floating
point running from 0..1.
@@ -829,18 +829,19 @@
>>> import yt
>>> import numpy as np
>>> from yt.visualization.volume_rendering.api import PointSource
+ >>> from yt.units import kpc
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-
+
>>> im, sc = yt.volume_render(ds)
-
+
>>> npoints = 1000
- >>> vertices = np.random.random([npoints, 3])
+ >>> vertices = np.random.random([npoints, 3]) * 1000 * kpc
>>> colors = np.random.random([npoints, 4])
>>> colors[:,3] = 1.0
>>> points = PointSource(vertices, colors=colors)
>>> sc.add_source(points)
-
+
>>> im = sc.render()
"""
@@ -858,7 +859,6 @@
# If colors aren't individually set, make black with full opacity
if colors is None:
colors = np.ones((len(positions), 4))
- colors[:, 3] = 1.
self.colors = colors
self.color_stride = color_stride
@@ -912,19 +912,25 @@
This class provides a mechanism for adding lines to a scene; these
points will be opaque, and can also be colored.
+ .. note::
+
+ If adding a LineSource to your rendering causes the image to appear
+ blank or fades a VolumeSource, try lowering the values specified in
+ the alpha channel of the ``colors`` array.
+
Parameters
----------
positions: array, shape (N, 2, 3)
- These positions, in data-space coordinates, are the starting and
- stopping points for each pair of lines. For example,
- positions[0][0] and positions[0][1] would give the (x, y, z)
+ The positions of the starting and stopping points for each line.
+ For example,positions[0][0] and positions[0][1] would give the (x, y, z)
coordinates of the beginning and end points of the first line,
- respectively.
+ respectively. If specified with no units, assumed to be in code units.
colors : array, shape (N, 4), optional
The colors of the points, including an alpha channel, in floating
- point running from 0..1. Note that they correspond to the line
- segment succeeding each point; this means that strictly speaking
- they need only be (N-1) in length.
+ point running from 0..1. The four channels correspond to r, g, b, and
+ alpha values. Note that they correspond to the line segment succeeding
+ each point; this means that strictly speaking they need only be (N-1)
+ in length.
color_stride : int, optional
The stride with which to access the colors when putting them on the
scene.
@@ -938,20 +944,21 @@
>>> import yt
>>> import numpy as np
>>> from yt.visualization.volume_rendering.api import LineSource
+ >>> from yt.units import kpc
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-
+
>>> im, sc = yt.volume_render(ds)
-
- >>> npoints = 100
- >>> vertices = np.random.random([npoints, 2, 3])
- >>> colors = np.random.random([npoints, 4])
+
+ >>> nlines = 4
+ >>> vertices = np.random.random([nlines, 2, 3]) * 600 * kpc
+ >>> colors = np.random.random([nlines, 4])
>>> colors[:,3] = 1.0
-
+
>>> lines = LineSource(vertices, colors)
>>> sc.add_source(lines)
>>> im = sc.render()
-
+
"""
_image = None
@@ -974,7 +981,6 @@
# If colors aren't individually set, make black with full opacity
if colors is None:
colors = np.ones((len(positions), 4))
- colors[:, 3] = 1.
self.colors = colors
self.color_stride = color_stride
@@ -1016,14 +1022,15 @@
py = py.astype('int64')
if len(px.shape) == 1:
- zlines(empty, z, px, py, dz, self.colors, self.color_stride)
+ zlines(empty, z, px, py, dz, self.colors.astype('float64'),
+ self.color_stride)
else:
# For stereo-lens, two sets of pos for each eye are contained
# in px...pz
- zlines(empty, z, px[0,:], py[0,:], dz[0,:], self.colors,
- self.color_stride)
- zlines(empty, z, px[1,:], py[1,:], dz[1,:], self.colors,
- self.color_stride)
+ zlines(empty, z, px[0, :], py[0, :], dz[0, :],
+ self.colors.astype('float64'), self.color_stride)
+ zlines(empty, z, px[1, :], py[1, :], dz[1, :],
+ self.colors.astype('float64'), self.color_stride)
self.zbuffer = zbuffer
return zbuffer
@@ -1180,7 +1187,7 @@
colors = apply_colormap(
levels*1.0,
color_bounds=[0, self.data_source.ds.index.max_level],
- cmap_name=cmap)[0, :, :]*alpha/255.
+ cmap_name=cmap)[0, :, :]/255.
colors[:, 3] = alpha
order = [0, 1, 1, 2, 2, 3, 3, 0]
@@ -1230,9 +1237,9 @@
# If colors aren't individually set, make black with full opacity
if colors is None:
colors = np.zeros((3, 4))
- colors[0, 0] = alpha # x is red
- colors[1, 1] = alpha # y is green
- colors[2, 2] = alpha # z is blue
+ colors[0, 0] = 1.0 # x is red
+ colors[1, 1] = 1.0 # y is green
+ colors[2, 2] = 1.0 # z is blue
colors[:, 3] = alpha
self.colors = colors
self.color_stride = 2
@@ -1316,14 +1323,15 @@
py = py.astype('int64')
if len(px.shape) == 1:
- zlines(empty, z, px, py, dz, self.colors, self.color_stride)
+ zlines(empty, z, px, py, dz, self.colors.astype('float64'),
+ self.color_stride)
else:
# For stereo-lens, two sets of pos for each eye are contained
# in px...pz
- zlines(empty, z, px[0,:], py[0,:], dz[0,:], self.colors,
- self.color_stride)
- zlines(empty, z, px[1,:], py[1,:], dz[1,:], self.colors,
- self.color_stride)
+ zlines(empty, z, px[0, :], py[0, :], dz[0, :],
+ self.colors.astype('float64'), self.color_stride)
+ zlines(empty, z, px[1, :], py[1, :], dz[1, :],
+ self.colors.astype('float64'), self.color_stride)
# Set the new zbuffer
self.zbuffer = zbuffer
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -34,7 +34,9 @@
GridSource, \
RenderSource, \
MeshSource, \
- VolumeSource
+ VolumeSource, \
+ PointSource, \
+ LineSource
from .zbuffer_array import ZBuffer
from yt.extern.six.moves import builtins
from yt.utilities.exceptions import YTNotInsideNotebook
@@ -158,6 +160,11 @@
"Line annotation sources are not supported for %s."
% (type(self.camera.lens).__name__), )
+ if isinstance(render_source, (LineSource, PointSource)):
+ if isinstance(render_source.positions, YTArray):
+ render_source.positions = \
+ self.arr(render_source.positions).in_units('code_length').d
+
self.sources[keyname] = render_source
return self
@@ -330,10 +337,11 @@
A format specifier (e.g., label_fmt="%.2g") to use in formatting
the data values that label the transfer function colorbar.
text_annotate : list of iterables
- Any text that you wish to display on the image. This should be an
- list of a tuple of coordinates (in normalized figure coordinates),
- the text to display, and, optionally, a dictionary of keyword/value
- pairs to pass through to the matplotlib text() function.
+ Any text that you wish to display on the image. This should be an
+ list containing a tuple of coordinates (in normalized figure
+ coordinates), the text to display, and, optionally, a dictionary of
+ keyword/value pairs to pass through to the matplotlib text()
+ function.
Each item in the main list is a separate string to write.
@@ -385,12 +393,12 @@
rs = rensources[0]
tf = rs.transfer_function
label = rs.data_source.ds._get_field_info(rs.field).get_label()
- if rs.data_source.ds._get_field_info(rs.field).take_log:
+ if rs.log_field:
label = r'$\rm{log}\ $' + label
ax = self._show_mpl(self._last_render.swapaxes(0, 1),
sigma_clip=sigma_clip, dpi=dpi)
- self._annotate(ax.axes, tf, label=label, label_fmt=label_fmt)
+ self._annotate(ax.axes, tf, rs, label=label, label_fmt=label_fmt)
plt.tight_layout()
# any text?
@@ -434,7 +442,7 @@
return axim
- def _annotate(self, ax, tf, label="", label_fmt=None):
+ def _annotate(self, ax, tf, source, label="", label_fmt=None):
import matplotlib.pyplot as plt
ax.get_xaxis().set_visible(False)
ax.get_xaxis().set_ticks([])
@@ -442,7 +450,9 @@
ax.get_yaxis().set_ticks([])
cb = plt.colorbar(ax.images[0], pad=0.0, fraction=0.05,
drawedges=True, shrink=0.75)
- tf.vert_cbar(ax=cb.ax, label=label, label_fmt=label_fmt)
+ tf.vert_cbar(ax=cb.ax, label=label, label_fmt=label_fmt,
+ resolution=self.camera.resolution[0],
+ log_scale=source.log_field)
def _validate(self):
r"""Validate the current state of the scene."""
@@ -649,7 +659,7 @@
"""
box_source = BoxSource(ds.domain_left_edge,
ds.domain_right_edge,
- color=None)
+ color=color)
self.add_source(box_source)
return self
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -47,7 +47,7 @@
self.log = False
self.tf = None
self.bounds = None
- self.grey_opacity = True
+ self.grey_opacity = False
self.profiles = {}
def set_bounds(self, bounds=None):
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -554,7 +554,8 @@
ax.set_ylabel("Opacity")
ax.set_xlabel("Value")
- def vert_cbar(self, ax=None, label=None, label_fmt=None):
+ def vert_cbar(self, resolution, log_scale, ax=None, label=None,
+ label_fmt=None):
r"""Display an image of the transfer function
This function loads up matplotlib and displays the current transfer function.
@@ -603,6 +604,8 @@
ax.yaxis.set_ticks(xticks)
def x_format(x, pos):
val = x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size-1) + self.alpha.x[0]
+ if log_scale is True:
+ val = 10**val
if label_fmt is None:
if abs(val) < 1.e-3 or abs(val) > 1.e4:
if not val == 0.0:
@@ -626,7 +629,7 @@
ax.get_xaxis().set_ticks([])
ax.set_ylim(visible[0], visible[-1])
ax.tick_params(axis='y', colors='white', size=10)
- ax.set_ylabel(label, color='white')
+ ax.set_ylabel(label, color='white', size=10*resolution/512.0)
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list