[yt-svn] commit/yt: 16 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Wed Jul 20 10:34:18 PDT 2016
16 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/a49f6de5e391/
Changeset: a49f6de5e391
Branch: yt
User: ngoldbaum
Date: 2016-07-16 18:31:19+00:00
Summary: Improve documentation for PointSource and LineSource. Closes #1194.
Affected #: 6 files
diff -r e7aabcd611651728be2dca5e817883f99c75a4a5 -r a49f6de5e3910cb10870e76ecb016901e91bbdb7 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -303,6 +303,26 @@
.. yt_cookbook:: vol-annotated.py
+.. _cookbook-vol-points:
+
+Volume Rendering with Points
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrate how to make a volume rendering composited with point
+sources. This could represent star or dark matter particles, for example.
+
+.. yt_cookbook:: vol-points.py
+
+.. _cookbook-vol-lines:
+
+Volume Rendering with Lines
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrate how to make a volume rendering composited with point
+sources.
+
+.. yt_cookbook:: vol-lines.py
+
.. _cookbook-opengl_vr:
Advanced Interactive Data Visualization
diff -r e7aabcd611651728be2dca5e817883f99c75a4a5 -r a49f6de5e3910cb10870e76ecb016901e91bbdb7 doc/source/cookbook/vol-lines.py
--- /dev/null
+++ b/doc/source/cookbook/vol-lines.py
@@ -0,0 +1,22 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import LineSource
+from yt.units import kpc
+
+np.random.seed(1234567)
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+im, sc = yt.volume_render(ds)
+
+nlines = 50
+vertices = (np.random.random([nlines, 2, 3]) - 0.5) * 250 * kpc
+colors = np.random.random([nlines, 4])
+colors[:, 3] = 1.0
+
+lines = LineSource(vertices, colors)
+sc.add_source(lines)
+
+sc.camera.width = 300*kpc
+
+sc.save()
diff -r e7aabcd611651728be2dca5e817883f99c75a4a5 -r a49f6de5e3910cb10870e76ecb016901e91bbdb7 doc/source/cookbook/vol-points.py
--- /dev/null
+++ b/doc/source/cookbook/vol-points.py
@@ -0,0 +1,22 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import PointSource
+from yt.units import kpc
+
+np.random.seed(1234567)
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+im, sc = yt.volume_render(ds)
+
+npoints = 1000
+vertices = np.random.random([npoints, 3])*200*kpc
+colors = np.random.random([npoints, 4])
+colors[:, 3] = 1.0
+
+points = PointSource(vertices, colors=colors)
+sc.add_source(points)
+
+sc.camera.width = 300*kpc
+
+sc.save()
diff -r e7aabcd611651728be2dca5e817883f99c75a4a5 -r a49f6de5e3910cb10870e76ecb016901e91bbdb7 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -129,6 +129,9 @@
vertices. For instance, lines can be used to draw outlines of regions or
continents.
+Worked examples of using the ``LineSource`` and ``PointSource`` are available at
+:ref:`cookbook-vol-points` and :ref:`cookbook-vol-lines`.
+
.. _volume_rendering_annotations:
Annotations
diff -r e7aabcd611651728be2dca5e817883f99c75a4a5 -r a49f6de5e3910cb10870e76ecb016901e91bbdb7 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -811,8 +811,8 @@
Parameters
----------
positions: array, shape (N, 3)
- These positions, in data-space coordinates, are the points to be
- added to the scene.
+ The positions of points to be added to the scene. If specified wih no
+ units, the positions will be assumed to be in code units.
colors : array, shape (N, 4), optional
The colors of the points, including an alpha channel, in floating
point running from 0..1.
@@ -829,18 +829,19 @@
>>> import yt
>>> import numpy as np
>>> from yt.visualization.volume_rendering.api import PointSource
+ >>> from yt.units import kpc
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-
+
>>> im, sc = yt.volume_render(ds)
-
+
>>> npoints = 1000
- >>> vertices = np.random.random([npoints, 3])
+ >>> vertices = np.random.random([npoints, 3]) * 1000 * kpc
>>> colors = np.random.random([npoints, 4])
>>> colors[:,3] = 1.0
>>> points = PointSource(vertices, colors=colors)
>>> sc.add_source(points)
-
+
>>> im = sc.render()
"""
@@ -912,19 +913,25 @@
This class provides a mechanism for adding lines to a scene; these
points will be opaque, and can also be colored.
+ .. note::
+
+ If adding a LineSource to your rendering causes the image to appear
+ blank or fades a VolumeSource, try lowering the values specified in
+ the alpha channel of the ``colors`` array.
+
Parameters
----------
positions: array, shape (N, 2, 3)
- These positions, in data-space coordinates, are the starting and
- stopping points for each pair of lines. For example,
- positions[0][0] and positions[0][1] would give the (x, y, z)
+ The positions of the starting and stopping points for each line.
+ For example,positions[0][0] and positions[0][1] would give the (x, y, z)
coordinates of the beginning and end points of the first line,
- respectively.
+ respectively. If specified with no units, assumed to be in code units.
colors : array, shape (N, 4), optional
The colors of the points, including an alpha channel, in floating
- point running from 0..1. Note that they correspond to the line
- segment succeeding each point; this means that strictly speaking
- they need only be (N-1) in length.
+ point running from 0..1. The fourth channels correspond to r, g, b, and
+ alpha values. Note that they correspond to the line segment succeeding
+ each point; this means that strictly speaking they need only be (N-1)
+ in length.
color_stride : int, optional
The stride with which to access the colors when putting them on the
scene.
@@ -938,20 +945,21 @@
>>> import yt
>>> import numpy as np
>>> from yt.visualization.volume_rendering.api import LineSource
+ >>> from yt.units import kpc
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-
+
>>> im, sc = yt.volume_render(ds)
-
- >>> npoints = 100
- >>> vertices = np.random.random([npoints, 2, 3])
- >>> colors = np.random.random([npoints, 4])
+
+ >>> nlines = 4
+ >>> vertices = np.random.random([nlines, 2, 3]) * 600 * kpc
+ >>> colors = np.random.random([nlines, 4])
>>> colors[:,3] = 1.0
-
+
>>> lines = LineSource(vertices, colors)
>>> sc.add_source(lines)
>>> im = sc.render()
-
+
"""
_image = None
diff -r e7aabcd611651728be2dca5e817883f99c75a4a5 -r a49f6de5e3910cb10870e76ecb016901e91bbdb7 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -34,7 +34,9 @@
GridSource, \
RenderSource, \
MeshSource, \
- VolumeSource
+ VolumeSource, \
+ PointSource, \
+ LineSource
from .zbuffer_array import ZBuffer
from yt.extern.six.moves import builtins
from yt.utilities.exceptions import YTNotInsideNotebook
@@ -158,6 +160,10 @@
"Line annotation sources are not supported for %s."
% (type(self.camera.lens).__name__), )
+ if isinstance(render_source, (LineSource, PointSource)):
+ render_source.positions = \
+ self.arr(render_source.positions).in_units('code_length').d
+
self.sources[keyname] = render_source
return self
https://bitbucket.org/yt_analysis/yt/commits/cbe7fee11942/
Changeset: cbe7fee11942
Branch: yt
User: ngoldbaum
Date: 2016-07-16 19:06:55+00:00
Summary: Only convert to code units if we're passed a YTArray
Affected #: 1 file
diff -r a49f6de5e3910cb10870e76ecb016901e91bbdb7 -r cbe7fee1194215a1bd8bcfd37df5f7a8a587e2a9 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -161,8 +161,9 @@
% (type(self.camera.lens).__name__), )
if isinstance(render_source, (LineSource, PointSource)):
- render_source.positions = \
- self.arr(render_source.positions).in_units('code_length').d
+ if isinstance(render_source.positions, YTArray):
+ render_source.positions = \
+ self.arr(render_source.positions).in_units('code_length').d
self.sources[keyname] = render_source
https://bitbucket.org/yt_analysis/yt/commits/b47afa262952/
Changeset: b47afa262952
Branch: yt
User: ngoldbaum
Date: 2016-07-16 22:51:33+00:00
Summary: Use grey_opacity=False by default, this makes it easier to composite renderings
Affected #: 1 file
diff -r be5b44a0d960e1d4c553b1df7a2390a20623fd1d -r b47afa26295204e5fe20cd117a03ba043dc235cc yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -47,7 +47,7 @@
self.log = False
self.tf = None
self.bounds = None
- self.grey_opacity = True
+ self.grey_opacity = False
self.profiles = {}
def set_bounds(self, bounds=None):
https://bitbucket.org/yt_analysis/yt/commits/dd3adb15b431/
Changeset: dd3adb15b431
Branch: yt
User: ngoldbaum
Date: 2016-07-16 22:51:59+00:00
Summary: Fix issues with opaque sources fading volume renderings. Closes #1129
Affected #: 5 files
diff -r b47afa26295204e5fe20cd117a03ba043dc235cc -r dd3adb15b431d346b6b2a70ccde05cd19dfe899b doc/source/cookbook/vol-annotated.py
--- a/doc/source/cookbook/vol-annotated.py
+++ b/doc/source/cookbook/vol-annotated.py
@@ -1,75 +1,29 @@
-#!/usr/bin/env python
+import yt
-import numpy as np
-import pylab
+ds = yt.load('Enzo_64/DD0043/data0043')
-import yt
-import yt.visualization.volume_rendering.old_camera as vr
+sc = yt.create_scene(ds, lens_type='perspective')
-ds = yt.load("maestro_subCh_plt00248")
+source = sc[0]
-dd = ds.all_data()
+source.set_field('density')
+source.set_log(True)
-# field in the dataset we will visualize
-field = ('boxlib', 'radial_velocity')
+# Set up the camera parameters: focus, width, resolution, and image orientation
+sc.camera.focus = ds.domain_center
+sc.camera.resolution = 1024
+sc.camera.north_vector = [0, 0, 1]
+sc.camera.position = [1.7, 1.7, 1.7]
-# the values we wish to highlight in the rendering. We'll put a Gaussian
-# centered on these with width sigma
-vals = [-1.e7, -5.e6, -2.5e6, 2.5e6, 5.e6, 1.e7]
-sigma = 2.e5
+# You may need to adjust the alpha values to get an image with good contrast.
+# For the annotate_domain call, the fourth value in the color tuple is the
+# alpha value.
+sc.annotate_axes(alpha=.02)
+sc.annotate_domain(ds, color=[1, 1, 1, .01])
-mi, ma = min(vals), max(vals)
+text_string = "T = {} Gyr".format(float(ds.current_time.to('Gyr')))
-# Instantiate the ColorTransferfunction.
-tf = yt.ColorTransferFunction((mi, ma))
-
-for v in vals:
- tf.sample_colormap(v, sigma**2, colormap="coolwarm")
-
-
-# volume rendering requires periodic boundaries. This dataset has
-# solid walls. We need to hack it for now (this will be fixed in
-# a later yt)
-ds.periodicity = (True, True, True)
-
-
-# Set up the camera parameters: center, looking direction, width, resolution
-c = np.array([0.0, 0.0, 0.0])
-L = np.array([1.0, 1.0, 1.2])
-W = 1.5*ds.domain_width
-N = 720
-
-# +z is "up" for our dataset
-north=[0.0,0.0,1.0]
-
-# Create a camera object
-cam = vr.Camera(c, L, W, N, transfer_function=tf, ds=ds,
- no_ghost=False, north_vector=north,
- fields = [field], log_fields = [False])
-
-im = cam.snapshot()
-
-# add an axes triad
-cam.draw_coordinate_vectors(im)
-
-# add the domain box to the image
-nim = cam.draw_domain(im)
-
-# increase the contrast -- for some reason, the enhance default
-# to save_annotated doesn't do the trick
-max_val = im[:,:,:3].std() * 4.0
-nim[:,:,:3] /= max_val
-
-# we want to write the simulation time on the figure, so create a
-# figure and annotate it
-f = pylab.figure()
-
-pylab.text(0.2, 0.85, "{:.3g} s".format(float(ds.current_time.d)),
- transform=f.transFigure, color="white")
-
-# tell the camera to use our figure
-cam._render_figure = f
-
-# save annotated -- this added the transfer function values,
-# and the clear_fig=False ensures it writes onto our existing figure.
-cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)
+# save an annotated version of the volume rendering including a representation
+# of the transfer function and a nice label showing the simulation time.
+sc.save_annotated("vol_annotated.png", sigma_clip=6,
+ text_annotate=[[(.1, 1.05), text_string]])
diff -r b47afa26295204e5fe20cd117a03ba043dc235cc -r dd3adb15b431d346b6b2a70ccde05cd19dfe899b yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -348,8 +348,10 @@
if crop == 1 and (dx > nx/2.0 or dy > ny/2.0):
continue
- for i in range(4):
- alpha[i] = colors[j/points_per_color, i]
+ for i in range(3):
+ alpha[i] = (colors[j/points_per_color, i] *
+ colors[j/points_per_color, 3])
+ alpha[3] = colors[j/points_per_color, 3]
if x0 < x1:
sx = 1
diff -r b47afa26295204e5fe20cd117a03ba043dc235cc -r dd3adb15b431d346b6b2a70ccde05cd19dfe899b yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -471,10 +471,10 @@
image.shape = camera.resolution[0], camera.resolution[1], 4
# If the call is from VR, the image is rotated by 180 to get correct
# up direction
- if call_from_VR is True:
+ if call_from_VR is True:
image = np.rot90(image, k=2)
if self.transfer_function.grey_opacity is False:
- image[:, :, 3] = 1.0
+ image[:, :, 3] = 1
return image
def __repr__(self):
@@ -858,7 +858,6 @@
# If colors aren't individually set, make black with full opacity
if colors is None:
colors = np.ones((len(positions), 4))
- colors[:, 3] = 1.
self.colors = colors
self.color_stride = color_stride
@@ -974,7 +973,6 @@
# If colors aren't individually set, make black with full opacity
if colors is None:
colors = np.ones((len(positions), 4))
- colors[:, 3] = 1.
self.colors = colors
self.color_stride = color_stride
@@ -1016,14 +1014,15 @@
py = py.astype('int64')
if len(px.shape) == 1:
- zlines(empty, z, px, py, dz, self.colors, self.color_stride)
+ zlines(empty, z, px, py, dz, self.colors.astype('float64'),
+ self.color_stride)
else:
# For stereo-lens, two sets of pos for each eye are contained
# in px...pz
- zlines(empty, z, px[0,:], py[0,:], dz[0,:], self.colors,
- self.color_stride)
- zlines(empty, z, px[1,:], py[1,:], dz[1,:], self.colors,
- self.color_stride)
+ zlines(empty, z, px[0, :], py[0, :], dz[0, :],
+ self.colors.astype('float64'), self.color_stride)
+ zlines(empty, z, px[1, :], py[1, :], dz[1, :],
+ self.colors.astype('float64'), self.color_stride)
self.zbuffer = zbuffer
return zbuffer
@@ -1180,7 +1179,7 @@
colors = apply_colormap(
levels*1.0,
color_bounds=[0, self.data_source.ds.index.max_level],
- cmap_name=cmap)[0, :, :]*alpha/255.
+ cmap_name=cmap)[0, :, :]/255.
colors[:, 3] = alpha
order = [0, 1, 1, 2, 2, 3, 3, 0]
@@ -1230,9 +1229,9 @@
# If colors aren't individually set, make black with full opacity
if colors is None:
colors = np.zeros((3, 4))
- colors[0, 0] = alpha # x is red
- colors[1, 1] = alpha # y is green
- colors[2, 2] = alpha # z is blue
+ colors[0, 0] = 1.0 # x is red
+ colors[1, 1] = 1.0 # y is green
+ colors[2, 2] = 1.0 # z is blue
colors[:, 3] = alpha
self.colors = colors
self.color_stride = 2
@@ -1316,14 +1315,15 @@
py = py.astype('int64')
if len(px.shape) == 1:
- zlines(empty, z, px, py, dz, self.colors, self.color_stride)
+ zlines(empty, z, px, py, dz, self.colors.astype('float64'),
+ self.color_stride)
else:
# For stereo-lens, two sets of pos for each eye are contained
# in px...pz
- zlines(empty, z, px[0,:], py[0,:], dz[0,:], self.colors,
- self.color_stride)
- zlines(empty, z, px[1,:], py[1,:], dz[1,:], self.colors,
- self.color_stride)
+ zlines(empty, z, px[0, :], py[0, :], dz[0, :],
+ self.colors.astype('float64'), self.color_stride)
+ zlines(empty, z, px[1, :], py[1, :], dz[1, :],
+ self.colors.astype('float64'), self.color_stride)
# Set the new zbuffer
self.zbuffer = zbuffer
diff -r b47afa26295204e5fe20cd117a03ba043dc235cc -r dd3adb15b431d346b6b2a70ccde05cd19dfe899b yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -330,10 +330,11 @@
A format specifier (e.g., label_fmt="%.2g") to use in formatting
the data values that label the transfer function colorbar.
text_annotate : list of iterables
- Any text that you wish to display on the image. This should be an
- list of a tuple of coordinates (in normalized figure coordinates),
- the text to display, and, optionally, a dictionary of keyword/value
- pairs to pass through to the matplotlib text() function.
+ Any text that you wish to display on the image. This should be an
+ list containing a tuple of coordinates (in normalized figure
+ coordinates), the text to display, and, optionally, a dictionary of
+ keyword/value pairs to pass through to the matplotlib text()
+ function.
Each item in the main list is a separate string to write.
@@ -385,12 +386,12 @@
rs = rensources[0]
tf = rs.transfer_function
label = rs.data_source.ds._get_field_info(rs.field).get_label()
- if rs.data_source.ds._get_field_info(rs.field).take_log:
+ if rs.log_field:
label = r'$\rm{log}\ $' + label
ax = self._show_mpl(self._last_render.swapaxes(0, 1),
sigma_clip=sigma_clip, dpi=dpi)
- self._annotate(ax.axes, tf, label=label, label_fmt=label_fmt)
+ self._annotate(ax.axes, tf, rs, label=label, label_fmt=label_fmt)
plt.tight_layout()
# any text?
@@ -435,7 +436,7 @@
return axim
- def _annotate(self, ax, tf, label="", label_fmt=None):
+ def _annotate(self, ax, tf, source, label="", label_fmt=None):
import matplotlib.pyplot as plt
ax.get_xaxis().set_visible(False)
ax.get_xaxis().set_ticks([])
@@ -443,7 +444,9 @@
ax.get_yaxis().set_ticks([])
cb = plt.colorbar(ax.images[0], pad=0.0, fraction=0.05,
drawedges=True, shrink=0.75)
- tf.vert_cbar(ax=cb.ax, label=label, label_fmt=label_fmt)
+ tf.vert_cbar(ax=cb.ax, label=label, label_fmt=label_fmt,
+ resolution=self.camera.resolution[0],
+ log_scale=source.log_field)
def _validate(self):
r"""Validate the current state of the scene."""
@@ -650,7 +653,7 @@
"""
box_source = BoxSource(ds.domain_left_edge,
ds.domain_right_edge,
- color=None)
+ color=color)
self.add_source(box_source)
return self
diff -r b47afa26295204e5fe20cd117a03ba043dc235cc -r dd3adb15b431d346b6b2a70ccde05cd19dfe899b yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -554,7 +554,8 @@
ax.set_ylabel("Opacity")
ax.set_xlabel("Value")
- def vert_cbar(self, ax=None, label=None, label_fmt=None):
+ def vert_cbar(self, resolution, log_scale, ax=None, label=None,
+ label_fmt=None):
r"""Display an image of the transfer function
This function loads up matplotlib and displays the current transfer function.
@@ -603,6 +604,8 @@
ax.yaxis.set_ticks(xticks)
def x_format(x, pos):
val = x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size-1) + self.alpha.x[0]
+ if log_scale is True:
+ val = 10**val
if label_fmt is None:
if abs(val) < 1.e-3 or abs(val) > 1.e4:
if not val == 0.0:
@@ -626,7 +629,7 @@
ax.get_xaxis().set_ticks([])
ax.set_ylim(visible[0], visible[-1])
ax.tick_params(axis='y', colors='white', size=10)
- ax.set_ylabel(label, color='white')
+ ax.set_ylabel(label, color='white', size=10*resolution/512.0)
https://bitbucket.org/yt_analysis/yt/commits/4b5ef977bd96/
Changeset: 4b5ef977bd96
Branch: yt
User: ngoldbaum
Date: 2016-07-16 22:55:59+00:00
Summary: Fixing typos
Affected #: 2 files
diff -r cbe7fee1194215a1bd8bcfd37df5f7a8a587e2a9 -r 4b5ef977bd96037a010cea2d5ef3299f4a26a94b doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -308,7 +308,7 @@
Volume Rendering with Points
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This recipe demonstrate how to make a volume rendering composited with point
+This recipe demonstrates how to make a volume rendering composited with point
sources. This could represent star or dark matter particles, for example.
.. yt_cookbook:: vol-points.py
@@ -318,7 +318,7 @@
Volume Rendering with Lines
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This recipe demonstrate how to make a volume rendering composited with point
+This recipe demonstrates how to make a volume rendering composited with point
sources.
.. yt_cookbook:: vol-lines.py
diff -r cbe7fee1194215a1bd8bcfd37df5f7a8a587e2a9 -r 4b5ef977bd96037a010cea2d5ef3299f4a26a94b yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -811,7 +811,7 @@
Parameters
----------
positions: array, shape (N, 3)
- The positions of points to be added to the scene. If specified wih no
+ The positions of points to be added to the scene. If specified with no
units, the positions will be assumed to be in code units.
colors : array, shape (N, 4), optional
The colors of the points, including an alpha channel, in floating
@@ -928,7 +928,7 @@
respectively. If specified with no units, assumed to be in code units.
colors : array, shape (N, 4), optional
The colors of the points, including an alpha channel, in floating
- point running from 0..1. The fourth channels correspond to r, g, b, and
+ point running from 0..1. The four channels correspond to r, g, b, and
alpha values. Note that they correspond to the line segment succeeding
each point; this means that strictly speaking they need only be (N-1)
in length.
https://bitbucket.org/yt_analysis/yt/commits/4360491749fb/
Changeset: 4360491749fb
Branch: yt
User: ngoldbaum
Date: 2016-07-17 14:50:02+00:00
Summary: Bump varia answer number now that grey_opacity=False is the default
Affected #: 1 file
diff -r dd3adb15b431d346b6b2a70ccde05cd19dfe899b -r 4360491749fb61ee1918edcb66f064c5c224529d tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -50,7 +50,7 @@
local_tipsy_001:
- yt/frontends/tipsy/tests/test_outputs.py
- local_varia_001:
+ local_varia_002:
- yt/analysis_modules/radmc3d_export
- yt/frontends/moab/tests/test_c5.py
- yt/analysis_modules/photon_simulator/tests/test_spectra.py
https://bitbucket.org/yt_analysis/yt/commits/a4ff0334210d/
Changeset: a4ff0334210d
Branch: yt
User: ngoldbaum
Date: 2016-07-17 14:52:36+00:00
Summary: Make new volume rendering cookbook recipes reproducible
Affected #: 2 files
diff -r 4b5ef977bd96037a010cea2d5ef3299f4a26a94b -r a4ff0334210d68c0cafc9337777e2f0c608c411b doc/source/cookbook/vol-lines.py
--- a/doc/source/cookbook/vol-lines.py
+++ b/doc/source/cookbook/vol-lines.py
@@ -3,11 +3,11 @@
from yt.visualization.volume_rendering.api import LineSource
from yt.units import kpc
-np.random.seed(1234567)
-
ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-im, sc = yt.volume_render(ds)
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
nlines = 50
vertices = (np.random.random([nlines, 2, 3]) - 0.5) * 250 * kpc
diff -r 4b5ef977bd96037a010cea2d5ef3299f4a26a94b -r a4ff0334210d68c0cafc9337777e2f0c608c411b doc/source/cookbook/vol-points.py
--- a/doc/source/cookbook/vol-points.py
+++ b/doc/source/cookbook/vol-points.py
@@ -3,11 +3,11 @@
from yt.visualization.volume_rendering.api import PointSource
from yt.units import kpc
-np.random.seed(1234567)
-
ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-im, sc = yt.volume_render(ds)
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
npoints = 1000
vertices = np.random.random([npoints, 3])*200*kpc
https://bitbucket.org/yt_analysis/yt/commits/9cd33e2b7f53/
Changeset: 9cd33e2b7f53
Branch: yt
User: ngoldbaum
Date: 2016-07-17 15:12:43+00:00
Summary: Merging
Affected #: 30 files
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -62,6 +62,11 @@
yt/utilities/lib/marching_cubes.c
yt/utilities/lib/png_writer.h
yt/utilities/lib/write_array.c
+yt/utilities/lib/perftools_wrap.c
+yt/utilities/lib/partitioned_grid.c
+yt/utilities/lib/volume_container.c
+yt/utilities/lib/lenses.c
+yt/utilities/lib/image_samplers.c
syntax: glob
*.pyc
*.pyd
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -22,11 +22,11 @@
Three halo finding methods exist within yt. These are:
-* :ref:`fof`: a basic friend-of-friends algorithm (e.g. `Efstathiou et al. (1985)
+* :ref:`fof_finding`: a basic friend-of-friends algorithm (e.g. `Efstathiou et al. (1985)
<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_)
-* :ref:`hop`: `Eisenstein and Hut (1998)
+* :ref:`hop_finding`: `Eisenstein and Hut (1998)
<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_.
-* :ref:`rockstar`: a 6D phase-space halo finder developed by Peter Behroozi that
+* :ref:`rockstar_finding`: a 6D phase-space halo finder developed by Peter Behroozi that
scales well and does substructure finding (`Behroozi et al.
2011 <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_)
@@ -69,6 +69,8 @@
and
:class:`~yt.analysis_modules.halo_finding.rockstar.rockstar.RockstarHaloFinder`.
+.. _fof_finding:
+
FOF
^^^
@@ -78,6 +80,8 @@
details as well as
:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`.
+.. _hop_finding:
+
HOP
^^^
@@ -107,6 +111,8 @@
full details as well as
:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`.
+.. _rockstar_finding:
+
Rockstar
^^^^^^^^
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b doc/source/cookbook/vol-annotated.py
--- a/doc/source/cookbook/vol-annotated.py
+++ b/doc/source/cookbook/vol-annotated.py
@@ -1,75 +1,29 @@
-#!/usr/bin/env python
+import yt
-import numpy as np
-import pylab
+ds = yt.load('Enzo_64/DD0043/data0043')
-import yt
-import yt.visualization.volume_rendering.old_camera as vr
+sc = yt.create_scene(ds, lens_type='perspective')
-ds = yt.load("maestro_subCh_plt00248")
+source = sc[0]
-dd = ds.all_data()
+source.set_field('density')
+source.set_log(True)
-# field in the dataset we will visualize
-field = ('boxlib', 'radial_velocity')
+# Set up the camera parameters: focus, width, resolution, and image orientation
+sc.camera.focus = ds.domain_center
+sc.camera.resolution = 1024
+sc.camera.north_vector = [0, 0, 1]
+sc.camera.position = [1.7, 1.7, 1.7]
-# the values we wish to highlight in the rendering. We'll put a Gaussian
-# centered on these with width sigma
-vals = [-1.e7, -5.e6, -2.5e6, 2.5e6, 5.e6, 1.e7]
-sigma = 2.e5
+# You may need to adjust the alpha values to get an image with good contrast.
+# For the annotate_domain call, the fourth value in the color tuple is the
+# alpha value.
+sc.annotate_axes(alpha=.02)
+sc.annotate_domain(ds, color=[1, 1, 1, .01])
-mi, ma = min(vals), max(vals)
+text_string = "T = {} Gyr".format(float(ds.current_time.to('Gyr')))
-# Instantiate the ColorTransferfunction.
-tf = yt.ColorTransferFunction((mi, ma))
-
-for v in vals:
- tf.sample_colormap(v, sigma**2, colormap="coolwarm")
-
-
-# volume rendering requires periodic boundaries. This dataset has
-# solid walls. We need to hack it for now (this will be fixed in
-# a later yt)
-ds.periodicity = (True, True, True)
-
-
-# Set up the camera parameters: center, looking direction, width, resolution
-c = np.array([0.0, 0.0, 0.0])
-L = np.array([1.0, 1.0, 1.2])
-W = 1.5*ds.domain_width
-N = 720
-
-# +z is "up" for our dataset
-north=[0.0,0.0,1.0]
-
-# Create a camera object
-cam = vr.Camera(c, L, W, N, transfer_function=tf, ds=ds,
- no_ghost=False, north_vector=north,
- fields = [field], log_fields = [False])
-
-im = cam.snapshot()
-
-# add an axes triad
-cam.draw_coordinate_vectors(im)
-
-# add the domain box to the image
-nim = cam.draw_domain(im)
-
-# increase the contrast -- for some reason, the enhance default
-# to save_annotated doesn't do the trick
-max_val = im[:,:,:3].std() * 4.0
-nim[:,:,:3] /= max_val
-
-# we want to write the simulation time on the figure, so create a
-# figure and annotate it
-f = pylab.figure()
-
-pylab.text(0.2, 0.85, "{:.3g} s".format(float(ds.current_time.d)),
- transform=f.transFigure, color="white")
-
-# tell the camera to use our figure
-cam._render_figure = f
-
-# save annotated -- this added the transfer function values,
-# and the clear_fig=False ensures it writes onto our existing figure.
-cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)
+# save an annotated version of the volume rendering including a representation
+# of the transfer function and a nice label showing the simulation time.
+sc.save_annotated("vol_annotated.png", sigma_clip=6,
+ text_annotate=[[(.1, 1.05), text_string]])
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b doc/source/cookbook/vol-lines.py
--- a/doc/source/cookbook/vol-lines.py
+++ b/doc/source/cookbook/vol-lines.py
@@ -12,7 +12,7 @@
nlines = 50
vertices = (np.random.random([nlines, 2, 3]) - 0.5) * 250 * kpc
colors = np.random.random([nlines, 4])
-colors[:, 3] = 1.0
+colors[:, 3] = 0.1
lines = LineSource(vertices, colors)
sc.add_source(lines)
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b doc/source/cookbook/vol-points.py
--- a/doc/source/cookbook/vol-points.py
+++ b/doc/source/cookbook/vol-points.py
@@ -12,7 +12,8 @@
npoints = 1000
vertices = np.random.random([npoints, 3])*200*kpc
colors = np.random.random([npoints, 4])
-colors[:, 3] = 1.0
+colors[:, 3] = 1
+colors *= 0.1
points = PointSource(vertices, colors=colors)
sc.add_source(points)
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b setup.py
--- a/setup.py
+++ b/setup.py
@@ -175,6 +175,20 @@
extra_link_args=omp_args,
depends=["yt/utilities/lib/kdtree.h",
"yt/utilities/lib/fixed_interpolator.h"]),
+ Extension("yt.utilities.lib.image_samplers",
+ ["yt/utilities/lib/image_samplers.pyx",
+ "yt/utilities/lib/fixed_interpolator.c"],
+ include_dirs=["yt/utilities/lib/"],
+ libraries=std_libs,
+ extra_compile_args=omp_args,
+ extra_link_args=omp_args,
+ depends=["yt/utilities/lib/fixed_interpolator.h"]),
+ Extension("yt.utilities.lib.partitioned_grid",
+ ["yt/utilities/lib/partitioned_grid.pyx",
+ "yt/utilities/lib/fixed_interpolator.c"],
+ include_dirs=["yt/utilities/lib/"],
+ libraries=std_libs,
+ depends=["yt/utilities/lib/fixed_interpolator.h"]),
Extension("yt.utilities.lib.element_mappings",
["yt/utilities/lib/element_mappings.pyx"],
libraries=std_libs),
@@ -187,7 +201,7 @@
"particle_mesh_operations", "depth_first_octree", "fortran_reader",
"interpolators", "misc_utilities", "basic_octree", "image_utilities",
"points_in_volume", "quad_tree", "ray_integrators", "mesh_utilities",
- "amr_kdtools"
+ "amr_kdtools", "lenses",
]
for ext_name in lib_exts:
cython_extensions.append(
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -50,7 +50,7 @@
local_tipsy_001:
- yt/frontends/tipsy/tests/test_outputs.py
- local_varia_001:
+ local_varia_002:
- yt/analysis_modules/radmc3d_export
- yt/frontends/moab/tests/test_c5.py
- yt/analysis_modules/photon_simulator/tests/test_spectra.py
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -21,7 +21,7 @@
from yt.utilities.lib.contour_finding import \
ContourTree, TileContourTree, link_node_contours, \
update_joins
-from yt.utilities.lib.grid_traversal import \
+from yt.utilities.lib.partitioned_grid import \
PartitionedGrid
def identify_contours(data_source, field, min_val, max_val,
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -22,8 +22,10 @@
from .oct_container cimport OctreeContainer, OctAllocationContainer, Oct
cimport oct_visitors
from .oct_visitors cimport cind
+from yt.utilities.lib.volume_container cimport \
+ VolumeContainer
from yt.utilities.lib.grid_traversal cimport \
- VolumeContainer, sample_function, walk_volume
+ sampler_function, walk_volume
from yt.utilities.lib.bitarray cimport ba_get_value, ba_set_value
cdef extern from "math.h":
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -28,7 +28,7 @@
from yt.utilities.lib.amr_kdtools import Node
from yt.utilities.parallel_tools.parallel_analysis_interface import \
ParallelAnalysisInterface
-from yt.utilities.lib.grid_traversal import PartitionedGrid
+from yt.utilities.lib.partitioned_grid import PartitionedGrid
from yt.utilities.math_utils import periodic_position
steps = np.array([[-1, -1, -1], [-1, -1, 0], [-1, -1, 1],
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b yt/utilities/lib/contour_finding.pyx
--- a/yt/utilities/lib/contour_finding.pyx
+++ b/yt/utilities/lib/contour_finding.pyx
@@ -26,8 +26,10 @@
from yt.geometry.oct_visitors cimport \
Oct
from .amr_kdtools cimport Node
-from .grid_traversal cimport VolumeContainer, PartitionedGrid, \
- vc_index, vc_pos_index
+from .partitioned_grid cimport \
+ PartitionedGrid
+from .volume_container cimport \
+ VolumeContainer, vc_index, vc_pos_index
import sys
cdef inline ContourID *contour_create(np.int64_t contour_id,
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b yt/utilities/lib/field_interpolation_tables.pxd
--- a/yt/utilities/lib/field_interpolation_tables.pxd
+++ b/yt/utilities/lib/field_interpolation_tables.pxd
@@ -17,8 +17,7 @@
cimport numpy as np
from yt.utilities.lib.fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, fabs
from libc.stdlib cimport malloc
-
-DEF Nch = 4
+from libc.math cimport isnormal
cdef struct FieldInterpolationTable:
# Note that we make an assumption about retaining a reference to values
@@ -34,10 +33,6 @@
int weight_table_id
int nbins
-cdef extern from "math.h":
- double expf(double x) nogil
- int isnormal(double x) nogil
-
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -17,22 +17,8 @@
import numpy as np
cimport numpy as np
cimport cython
-cimport kdtree_utils
-
-cdef struct ImageContainer:
- np.float64_t[:,:,:] vp_pos
- np.float64_t[:,:,:] vp_dir
- np.float64_t *center
- np.float64_t[:,:,:] image
- np.float64_t[:,:] zbuffer
- np.int64_t[:,:] image_used
- np.int64_t[:,:] mesh_lines
- np.float64_t pdx, pdy
- np.float64_t bounds[4]
- np.float64_t[:,:] camera_data # position, width, unit_vec[0,2]
- int nv[2]
- np.float64_t *x_vec
- np.float64_t *y_vec
+from .image_samplers cimport ImageContainer, ImageSampler
+from .volume_container cimport VolumeContainer, vc_index, vc_pos_index
ctypedef void sampler_function(
VolumeContainer *vc,
@@ -43,82 +29,11 @@
int index[3],
void *data) nogil
-ctypedef void calculate_extent_function(ImageContainer *image,
- VolumeContainer *vc, np.int64_t rv[4]) nogil
-
-cdef calculate_extent_function calculate_extent_plane_parallel
-
-ctypedef void generate_vector_info_function(ImageContainer *im,
- np.int64_t vi, np.int64_t vj,
- np.float64_t width[2],
- np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil
-
-cdef generate_vector_info_function generate_vector_info_plane_parallel
-cdef generate_vector_info_function generate_vector_info_null
-
-cdef class ImageSampler:
- cdef ImageContainer *image
- cdef sampler_function *sampler
- cdef public object acenter, aimage, ax_vec, ay_vec
- cdef public object azbuffer
- cdef public object aimage_used
- cdef public object amesh_lines
- cdef void *supp_data
- cdef np.float64_t width[3]
- cdef public object lens_type
- cdef calculate_extent_function *extent_function
- cdef generate_vector_info_function *vector_function
-
- cdef void setup(self, PartitionedGrid pg)
-
-cdef struct VolumeContainer:
- int n_fields
- np.float64_t **data
- # The mask has dimensions one fewer in each direction than data
- np.uint8_t *mask
- np.float64_t left_edge[3]
- np.float64_t right_edge[3]
- np.float64_t dds[3]
- np.float64_t idds[3]
- int dims[3]
-
-cdef class PartitionedGrid:
- cdef public object my_data
- cdef public object source_mask
- cdef public object LeftEdge
- cdef public object RightEdge
- cdef public int parent_grid_id
- cdef VolumeContainer *container
- cdef kdtree_utils.kdtree *star_list
- cdef np.float64_t star_er
- cdef np.float64_t star_sigma_num
- cdef np.float64_t star_coeff
- cdef void get_vector_field(self, np.float64_t pos[3],
- np.float64_t *vel, np.float64_t *vel_mag)
-
-ctypedef void sample_function(
- VolumeContainer *vc,
- np.float64_t v_pos[3],
- np.float64_t v_dir[3],
- np.float64_t enter_t,
- np.float64_t exit_t,
- int index[3],
- void *data) nogil
-
cdef int walk_volume(VolumeContainer *vc,
np.float64_t v_pos[3],
np.float64_t v_dir[3],
- sample_function *sampler,
+ sampler_function *sampler,
void *data,
np.float64_t *return_t = *,
np.float64_t max_t = *) nogil
-cdef inline int vc_index(VolumeContainer *vc, int i, int j, int k):
- return (i*vc.dims[1]+j)*vc.dims[2]+k
-
-cdef inline int vc_pos_index(VolumeContainer *vc, np.float64_t *spos):
- cdef int index[3]
- cdef int i
- for i in range(3):
- index[i] = <int> ((spos[i] - vc.left_edge[i]) * vc.idds[i])
- return vc_index(vc, index[0], index[1], index[2])
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -17,8 +17,6 @@
cimport numpy as np
cimport cython
#cimport healpix_interface
-cdef extern from "limits.h":
- cdef int SHRT_MAX
from libc.stdlib cimport malloc, calloc, free, abs
from libc.math cimport exp, floor, log2, \
fabs, atan, atan2, asin, cos, sin, sqrt, acos, M_PI
@@ -28,913 +26,23 @@
FIT_eval_transfer_with_light
from fixed_interpolator cimport *
-cdef extern from "platform_dep.h":
- long int lrint(double x) nogil
-
from cython.parallel import prange, parallel, threadid
-from vec3_ops cimport dot, subtract, L2_norm, fma
-
from cpython.exc cimport PyErr_CheckSignals
+from .image_samplers cimport \
+ ImageSampler, \
+ ImageContainer, \
+ VolumeRenderAccumulator
+
DEF Nch = 4
-cdef class PartitionedGrid:
-
- @cython.boundscheck(False)
- @cython.wraparound(False)
- @cython.cdivision(True)
- def __cinit__(self,
- int parent_grid_id, data,
- mask,
- np.ndarray[np.float64_t, ndim=1] left_edge,
- np.ndarray[np.float64_t, ndim=1] right_edge,
- np.ndarray[np.int64_t, ndim=1] dims,
- star_kdtree_container star_tree = None):
- # The data is likely brought in via a slice, so we copy it
- cdef np.ndarray[np.float64_t, ndim=3] tdata
- cdef np.ndarray[np.uint8_t, ndim=3] mask_data
- self.container = NULL
- self.parent_grid_id = parent_grid_id
- self.LeftEdge = left_edge
- self.RightEdge = right_edge
- self.container = <VolumeContainer *> \
- malloc(sizeof(VolumeContainer))
- cdef VolumeContainer *c = self.container # convenience
- cdef int n_fields = len(data)
- c.n_fields = n_fields
- for i in range(3):
- c.left_edge[i] = left_edge[i]
- c.right_edge[i] = right_edge[i]
- c.dims[i] = dims[i]
- c.dds[i] = (c.right_edge[i] - c.left_edge[i])/dims[i]
- c.idds[i] = 1.0/c.dds[i]
- self.my_data = data
- self.source_mask = mask
- mask_data = mask
- c.data = <np.float64_t **> malloc(sizeof(np.float64_t*) * n_fields)
- for i in range(n_fields):
- tdata = data[i]
- c.data[i] = <np.float64_t *> tdata.data
- c.mask = <np.uint8_t *> mask_data.data
- if star_tree is None:
- self.star_list = NULL
- else:
- self.set_star_tree(star_tree)
-
- def set_star_tree(self, star_kdtree_container star_tree):
- self.star_list = star_tree.tree
- self.star_sigma_num = 2.0*star_tree.sigma**2.0
- self.star_er = 2.326 * star_tree.sigma
- self.star_coeff = star_tree.coeff
-
- def __dealloc__(self):
- # The data fields are not owned by the container, they are owned by us!
- # So we don't need to deallocate them.
- if self.container == NULL: return
- if self.container.data != NULL: free(self.container.data)
- free(self.container)
-
- @cython.boundscheck(False)
- @cython.wraparound(False)
- @cython.cdivision(True)
- def integrate_streamline(self, pos, np.float64_t h, mag):
- cdef np.float64_t cmag[1]
- cdef np.float64_t k1[3]
- cdef np.float64_t k2[3]
- cdef np.float64_t k3[3]
- cdef np.float64_t k4[3]
- cdef np.float64_t newpos[3]
- cdef np.float64_t oldpos[3]
- for i in range(3):
- newpos[i] = oldpos[i] = pos[i]
- self.get_vector_field(newpos, k1, cmag)
- for i in range(3):
- newpos[i] = oldpos[i] + 0.5*k1[i]*h
-
- if not (self.LeftEdge[0] < newpos[0] and newpos[0] < self.RightEdge[0] and \
- self.LeftEdge[1] < newpos[1] and newpos[1] < self.RightEdge[1] and \
- self.LeftEdge[2] < newpos[2] and newpos[2] < self.RightEdge[2]):
- if mag is not None:
- mag[0] = cmag[0]
- for i in range(3):
- pos[i] = newpos[i]
- return
-
- self.get_vector_field(newpos, k2, cmag)
- for i in range(3):
- newpos[i] = oldpos[i] + 0.5*k2[i]*h
-
- if not (self.LeftEdge[0] <= newpos[0] and newpos[0] <= self.RightEdge[0] and \
- self.LeftEdge[1] <= newpos[1] and newpos[1] <= self.RightEdge[1] and \
- self.LeftEdge[2] <= newpos[2] and newpos[2] <= self.RightEdge[2]):
- if mag is not None:
- mag[0] = cmag[0]
- for i in range(3):
- pos[i] = newpos[i]
- return
-
- self.get_vector_field(newpos, k3, cmag)
- for i in range(3):
- newpos[i] = oldpos[i] + k3[i]*h
-
- if not (self.LeftEdge[0] <= newpos[0] and newpos[0] <= self.RightEdge[0] and \
- self.LeftEdge[1] <= newpos[1] and newpos[1] <= self.RightEdge[1] and \
- self.LeftEdge[2] <= newpos[2] and newpos[2] <= self.RightEdge[2]):
- if mag is not None:
- mag[0] = cmag[0]
- for i in range(3):
- pos[i] = newpos[i]
- return
-
- self.get_vector_field(newpos, k4, cmag)
-
- for i in range(3):
- pos[i] = oldpos[i] + h*(k1[i]/6.0 + k2[i]/3.0 + k3[i]/3.0 + k4[i]/6.0)
-
- if mag is not None:
- for i in range(3):
- newpos[i] = pos[i]
- self.get_vector_field(newpos, k4, cmag)
- mag[0] = cmag[0]
-
- @cython.boundscheck(False)
- @cython.wraparound(False)
- @cython.cdivision(True)
- cdef void get_vector_field(self, np.float64_t pos[3],
- np.float64_t *vel, np.float64_t *vel_mag):
- cdef np.float64_t dp[3]
- cdef int ci[3]
- cdef VolumeContainer *c = self.container # convenience
-
- for i in range(3):
- ci[i] = (int)((pos[i]-self.LeftEdge[i])/c.dds[i])
- dp[i] = (pos[i] - ci[i]*c.dds[i] - self.LeftEdge[i])/c.dds[i]
-
- cdef int offset = ci[0] * (c.dims[1] + 1) * (c.dims[2] + 1) \
- + ci[1] * (c.dims[2] + 1) + ci[2]
-
- vel_mag[0] = 0.0
- for i in range(3):
- vel[i] = offset_interpolate(c.dims, dp, c.data[i] + offset)
- vel_mag[0] += vel[i]*vel[i]
- vel_mag[0] = np.sqrt(vel_mag[0])
- if vel_mag[0] != 0.0:
- for i in range(3):
- vel[i] /= vel_mag[0]
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-cdef void calculate_extent_plane_parallel(ImageContainer *image,
- VolumeContainer *vc, np.int64_t rv[4]) nogil:
- # We do this for all eight corners
- cdef np.float64_t temp
- cdef np.float64_t *edges[2]
- cdef np.float64_t cx, cy
- cdef np.float64_t extrema[4]
- cdef int i, j, k
- edges[0] = vc.left_edge
- edges[1] = vc.right_edge
- extrema[0] = extrema[2] = 1e300; extrema[1] = extrema[3] = -1e300
- for i in range(2):
- for j in range(2):
- for k in range(2):
- # This should rotate it into the vector plane
- temp = edges[i][0] * image.x_vec[0]
- temp += edges[j][1] * image.x_vec[1]
- temp += edges[k][2] * image.x_vec[2]
- if temp < extrema[0]: extrema[0] = temp
- if temp > extrema[1]: extrema[1] = temp
- temp = edges[i][0] * image.y_vec[0]
- temp += edges[j][1] * image.y_vec[1]
- temp += edges[k][2] * image.y_vec[2]
- if temp < extrema[2]: extrema[2] = temp
- if temp > extrema[3]: extrema[3] = temp
- cx = cy = 0.0
- for i in range(3):
- cx += image.center[i] * image.x_vec[i]
- cy += image.center[i] * image.y_vec[i]
- rv[0] = lrint((extrema[0] - cx - image.bounds[0])/image.pdx)
- rv[1] = rv[0] + lrint((extrema[1] - extrema[0])/image.pdx)
- rv[2] = lrint((extrema[2] - cy - image.bounds[2])/image.pdy)
- rv[3] = rv[2] + lrint((extrema[3] - extrema[2])/image.pdy)
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-cdef void calculate_extent_perspective(ImageContainer *image,
- VolumeContainer *vc, np.int64_t rv[4]) nogil:
-
- cdef np.float64_t cam_pos[3]
- cdef np.float64_t cam_width[3]
- cdef np.float64_t north_vector[3]
- cdef np.float64_t east_vector[3]
- cdef np.float64_t normal_vector[3]
- cdef np.float64_t vertex[3]
- cdef np.float64_t pos1[3]
- cdef np.float64_t sight_vector[3]
- cdef np.float64_t sight_center[3]
- cdef np.float64_t corners[3][8]
- cdef float sight_vector_norm, sight_angle_cos, sight_length, dx, dy
- cdef int i, iv, px, py
- cdef int min_px, min_py, max_px, max_py
-
- min_px = SHRT_MAX
- min_py = SHRT_MAX
- max_px = -SHRT_MAX
- max_py = -SHRT_MAX
-
- # calculate vertices for 8 corners of vc
- corners[0][0] = vc.left_edge[0]
- corners[0][1] = vc.right_edge[0]
- corners[0][2] = vc.right_edge[0]
- corners[0][3] = vc.left_edge[0]
- corners[0][4] = vc.left_edge[0]
- corners[0][5] = vc.right_edge[0]
- corners[0][6] = vc.right_edge[0]
- corners[0][7] = vc.left_edge[0]
-
- corners[1][0] = vc.left_edge[1]
- corners[1][1] = vc.left_edge[1]
- corners[1][2] = vc.right_edge[1]
- corners[1][3] = vc.right_edge[1]
- corners[1][4] = vc.left_edge[1]
- corners[1][5] = vc.left_edge[1]
- corners[1][6] = vc.right_edge[1]
- corners[1][7] = vc.right_edge[1]
-
- corners[2][0] = vc.left_edge[2]
- corners[2][1] = vc.left_edge[2]
- corners[2][2] = vc.left_edge[2]
- corners[2][3] = vc.left_edge[2]
- corners[2][4] = vc.right_edge[2]
- corners[2][5] = vc.right_edge[2]
- corners[2][6] = vc.right_edge[2]
- corners[2][7] = vc.right_edge[2]
-
- # This code was ported from
- # yt.visualization.volume_rendering.lens.PerspectiveLens.project_to_plane()
- for i in range(3):
- cam_pos[i] = image.camera_data[0, i]
- cam_width[i] = image.camera_data[1, i]
- east_vector[i] = image.camera_data[2, i]
- north_vector[i] = image.camera_data[3, i]
- normal_vector[i] = image.camera_data[4, i]
-
- for iv in range(8):
- vertex[0] = corners[0][iv]
- vertex[1] = corners[1][iv]
- vertex[2] = corners[2][iv]
-
- cam_width[1] = cam_width[0] * image.nv[1] / image.nv[0]
-
- subtract(vertex, cam_pos, sight_vector)
- fma(cam_width[2], normal_vector, cam_pos, sight_center)
-
- sight_vector_norm = L2_norm(sight_vector)
-
- if sight_vector_norm != 0:
- for i in range(3):
- sight_vector[i] /= sight_vector_norm
-
- sight_angle_cos = dot(sight_vector, normal_vector)
- sight_angle_cos = fclip(sight_angle_cos, -1.0, 1.0)
-
- if acos(sight_angle_cos) < 0.5 * M_PI and sight_angle_cos != 0.0:
- sight_length = cam_width[2] / sight_angle_cos
- else:
- sight_length = sqrt(cam_width[0]**2 + cam_width[1]**2)
- sight_length = sight_length / sqrt(1.0 - sight_angle_cos**2)
-
- fma(sight_length, sight_vector, cam_pos, pos1)
- subtract(pos1, sight_center, pos1)
- dx = dot(pos1, east_vector)
- dy = dot(pos1, north_vector)
-
- px = int(image.nv[0] * 0.5 + image.nv[0] / cam_width[0] * dx)
- py = int(image.nv[1] * 0.5 + image.nv[1] / cam_width[1] * dy)
- min_px = min(min_px, px)
- max_px = max(max_px, px)
- min_py = min(min_py, py)
- max_py = max(max_py, py)
-
- rv[0] = max(min_px, 0)
- rv[1] = min(max_px, image.nv[0])
- rv[2] = max(min_py, 0)
- rv[3] = min(max_py, image.nv[1])
-
-
-# We do this for a bunch of lenses. Fallback is to grab them from the vector
-# info supplied.
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-cdef void calculate_extent_null(ImageContainer *image,
- VolumeContainer *vc, np.int64_t rv[4]) nogil:
- rv[0] = 0
- rv[1] = image.nv[0]
- rv[2] = 0
- rv[3] = image.nv[1]
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-cdef void generate_vector_info_plane_parallel(ImageContainer *im,
- np.int64_t vi, np.int64_t vj,
- np.float64_t width[2],
- # Now outbound
- np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil:
- cdef int i
- cdef np.float64_t px, py
- px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
- py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
- # atleast_3d will add to beginning and end
- v_pos[0] = im.vp_pos[0,0,0]*px + im.vp_pos[0,3,0]*py + im.vp_pos[0,9,0]
- v_pos[1] = im.vp_pos[0,1,0]*px + im.vp_pos[0,4,0]*py + im.vp_pos[0,10,0]
- v_pos[2] = im.vp_pos[0,2,0]*px + im.vp_pos[0,5,0]*py + im.vp_pos[0,11,0]
- for i in range(3): v_dir[i] = im.vp_dir[0,i,0]
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-cdef void generate_vector_info_null(ImageContainer *im,
- np.int64_t vi, np.int64_t vj,
- np.float64_t width[2],
- # Now outbound
- np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil:
- cdef int i
- for i in range(3):
- # Here's a funny thing: we use vi here because our *image* will be
- # flattened. That means that im.nv will be a better one-d offset,
- # since vp_pos has funny strides.
- v_pos[i] = im.vp_pos[vi, vj, i]
- v_dir[i] = im.vp_dir[vi, vj, i]
-
-cdef struct ImageAccumulator:
- np.float64_t rgba[Nch]
- void *supp_data
-
-cdef class ImageSampler:
- def __init__(self,
- np.float64_t[:,:,:] vp_pos,
- np.float64_t[:,:,:] vp_dir,
- np.ndarray[np.float64_t, ndim=1] center,
- bounds,
- np.ndarray[np.float64_t, ndim=3] image,
- np.ndarray[np.float64_t, ndim=1] x_vec,
- np.ndarray[np.float64_t, ndim=1] y_vec,
- np.ndarray[np.float64_t, ndim=1] width,
- *args, **kwargs):
- self.image = <ImageContainer *> calloc(sizeof(ImageContainer), 1)
- cdef np.float64_t[:,:] zbuffer
- cdef np.int64_t[:,:] image_used
- cdef np.int64_t[:,:] mesh_lines
- cdef np.float64_t[:,:] camera_data
- cdef int i
-
- camera_data = kwargs.pop("camera_data", None)
- if camera_data is not None:
- self.image.camera_data = camera_data
-
- zbuffer = kwargs.pop("zbuffer", None)
- if zbuffer is None:
- zbuffer = np.ones((image.shape[0], image.shape[1]), "float64")
-
- image_used = np.zeros((image.shape[0], image.shape[1]), "int64")
- mesh_lines = np.zeros((image.shape[0], image.shape[1]), "int64")
-
- self.lens_type = kwargs.pop("lens_type", None)
- if self.lens_type == "plane-parallel":
- self.extent_function = calculate_extent_plane_parallel
- self.vector_function = generate_vector_info_plane_parallel
- else:
- if not (vp_pos.shape[0] == vp_dir.shape[0] == image.shape[0]) or \
- not (vp_pos.shape[1] == vp_dir.shape[1] == image.shape[1]):
- msg = "Bad lens shape / direction for %s\n" % (self.lens_type)
- msg += "Shapes: (%s - %s - %s) and (%s - %s - %s)" % (
- vp_pos.shape[0], vp_dir.shape[0], image.shape[0],
- vp_pos.shape[1], vp_dir.shape[1], image.shape[1])
- raise RuntimeError(msg)
-
- if camera_data is not None and self.lens_type == 'perspective':
- self.extent_function = calculate_extent_perspective
- else:
- self.extent_function = calculate_extent_null
- self.vector_function = generate_vector_info_null
-
- self.sampler = NULL
- # These assignments are so we can track the objects and prevent their
- # de-allocation from reference counts. Note that we do this to the
- # "atleast_3d" versions. Also, note that we re-assign the input
- # arguments.
- self.image.vp_pos = vp_pos
- self.image.vp_dir = vp_dir
- self.image.image = self.aimage = image
- self.acenter = center
- self.image.center = <np.float64_t *> center.data
- self.ax_vec = x_vec
- self.image.x_vec = <np.float64_t *> x_vec.data
- self.ay_vec = y_vec
- self.image.y_vec = <np.float64_t *> y_vec.data
- self.image.zbuffer = self.azbuffer = zbuffer
- self.image.image_used = self.aimage_used = image_used
- self.image.mesh_lines = self.amesh_lines = mesh_lines
- self.image.nv[0] = image.shape[0]
- self.image.nv[1] = image.shape[1]
- for i in range(4): self.image.bounds[i] = bounds[i]
- self.image.pdx = (bounds[1] - bounds[0])/self.image.nv[0]
- self.image.pdy = (bounds[3] - bounds[2])/self.image.nv[1]
- for i in range(3):
- self.width[i] = width[i]
-
- @cython.boundscheck(False)
- @cython.wraparound(False)
- @cython.cdivision(True)
- def __call__(self, PartitionedGrid pg, int num_threads = 0):
- # This routine will iterate over all of the vectors and cast each in
- # turn. Might benefit from a more sophisticated intersection check,
- # like http://courses.csusm.edu/cs697exz/ray_box.htm
- cdef int vi, vj, hit, i, j
- cdef np.int64_t iter[4]
- cdef VolumeContainer *vc = pg.container
- cdef ImageContainer *im = self.image
- self.setup(pg)
- if self.sampler == NULL: raise RuntimeError
- cdef np.float64_t *v_pos
- cdef np.float64_t *v_dir
- cdef np.float64_t max_t
- hit = 0
- cdef np.int64_t nx, ny, size
- self.extent_function(self.image, vc, iter)
- iter[0] = i64clip(iter[0]-1, 0, im.nv[0])
- iter[1] = i64clip(iter[1]+1, 0, im.nv[0])
- iter[2] = i64clip(iter[2]-1, 0, im.nv[1])
- iter[3] = i64clip(iter[3]+1, 0, im.nv[1])
- nx = (iter[1] - iter[0])
- ny = (iter[3] - iter[2])
- size = nx * ny
- cdef ImageAccumulator *idata
- cdef np.float64_t width[3]
- cdef int chunksize = 100
- for i in range(3):
- width[i] = self.width[i]
- with nogil, parallel(num_threads = num_threads):
- idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
- idata.supp_data = self.supp_data
- v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
- v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
- for j in prange(size, schedule="static", chunksize=chunksize):
- vj = j % ny
- vi = (j - vj) / ny + iter[0]
- vj = vj + iter[2]
- # Dynamically calculate the position
- self.vector_function(im, vi, vj, width, v_dir, v_pos)
- for i in range(Nch):
- idata.rgba[i] = im.image[vi, vj, i]
- max_t = fclip(im.zbuffer[vi, vj], 0.0, 1.0)
- walk_volume(vc, v_pos, v_dir, self.sampler,
- (<void *> idata), NULL, max_t)
- if (j % (10*chunksize)) == 0:
- with gil:
- PyErr_CheckSignals()
- for i in range(Nch):
- im.image[vi, vj, i] = idata.rgba[i]
- free(idata)
- free(v_pos)
- free(v_dir)
- return hit
-
- cdef void setup(self, PartitionedGrid pg):
- return
-
- def __dealloc__(self):
- self.image.image = None
- self.image.vp_pos = None
- self.image.vp_dir = None
- self.image.zbuffer = None
- self.image.camera_data = None
- self.image.image_used = None
- free(self.image)
-
-
-cdef void projection_sampler(
- VolumeContainer *vc,
- np.float64_t v_pos[3],
- np.float64_t v_dir[3],
- np.float64_t enter_t,
- np.float64_t exit_t,
- int index[3],
- void *data) nogil:
- cdef ImageAccumulator *im = <ImageAccumulator *> data
- cdef int i
- cdef np.float64_t dl = (exit_t - enter_t)
- cdef int di = (index[0]*vc.dims[1]+index[1])*vc.dims[2]+index[2]
- for i in range(imin(4, vc.n_fields)):
- im.rgba[i] += vc.data[i][di] * dl
-
-cdef struct VolumeRenderAccumulator:
- int n_fits
- int n_samples
- FieldInterpolationTable *fits
- int field_table_ids[6]
- np.float64_t star_coeff
- np.float64_t star_er
- np.float64_t star_sigma_num
- kdtree_utils.kdtree *star_list
- np.float64_t *light_dir
- np.float64_t *light_rgba
- int grey_opacity
-
-
-cdef class ProjectionSampler(ImageSampler):
- cdef void setup(self, PartitionedGrid pg):
- self.sampler = projection_sampler
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef void interpolated_projection_sampler(
- VolumeContainer *vc,
- np.float64_t v_pos[3],
- np.float64_t v_dir[3],
- np.float64_t enter_t,
- np.float64_t exit_t,
- int index[3],
- void *data) nogil:
- cdef ImageAccumulator *im = <ImageAccumulator *> data
- cdef VolumeRenderAccumulator *vri = <VolumeRenderAccumulator *> \
- im.supp_data
- # we assume this has vertex-centered data.
- cdef int offset = index[0] * (vc.dims[1] + 1) * (vc.dims[2] + 1) \
- + index[1] * (vc.dims[2] + 1) + index[2]
- cdef np.float64_t dp[3]
- cdef np.float64_t ds[3]
- cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
- cdef np.float64_t dvs[6]
- for i in range(3):
- dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
- dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
- dp[i] *= vc.idds[i]
- ds[i] = v_dir[i] * vc.idds[i] * dt
- for i in range(vri.n_samples):
- for j in range(vc.n_fields):
- dvs[j] = offset_interpolate(vc.dims, dp,
- vc.data[j] + offset)
- for j in range(imin(3, vc.n_fields)):
- im.rgba[j] += dvs[j] * dt
- for j in range(3):
- dp[j] += ds[j]
-
-cdef class InterpolatedProjectionSampler(ImageSampler):
- cdef VolumeRenderAccumulator *vra
- cdef public object tf_obj
- cdef public object my_field_tables
- def __cinit__(self,
- np.ndarray vp_pos,
- np.ndarray vp_dir,
- np.ndarray[np.float64_t, ndim=1] center,
- bounds,
- np.ndarray[np.float64_t, ndim=3] image,
- np.ndarray[np.float64_t, ndim=1] x_vec,
- np.ndarray[np.float64_t, ndim=1] y_vec,
- np.ndarray[np.float64_t, ndim=1] width,
- n_samples = 10, **kwargs):
- ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
- x_vec, y_vec, width, **kwargs)
- # Now we handle tf_obj
- self.vra = <VolumeRenderAccumulator *> \
- malloc(sizeof(VolumeRenderAccumulator))
- self.vra.n_samples = n_samples
- self.supp_data = <void *> self.vra
-
- cdef void setup(self, PartitionedGrid pg):
- self.sampler = interpolated_projection_sampler
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef void volume_render_sampler(
- VolumeContainer *vc,
- np.float64_t v_pos[3],
- np.float64_t v_dir[3],
- np.float64_t enter_t,
- np.float64_t exit_t,
- int index[3],
- void *data) nogil:
- cdef ImageAccumulator *im = <ImageAccumulator *> data
- cdef VolumeRenderAccumulator *vri = <VolumeRenderAccumulator *> \
- im.supp_data
- # we assume this has vertex-centered data.
- cdef int offset = index[0] * (vc.dims[1] + 1) * (vc.dims[2] + 1) \
- + index[1] * (vc.dims[2] + 1) + index[2]
- cdef int cell_offset = index[0] * (vc.dims[1]) * (vc.dims[2]) \
- + index[1] * (vc.dims[2]) + index[2]
- if vc.mask[cell_offset] != 1:
- return
- cdef np.float64_t dp[3]
- cdef np.float64_t ds[3]
- cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
- cdef np.float64_t dvs[6]
- for i in range(3):
- dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
- dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
- dp[i] *= vc.idds[i]
- ds[i] = v_dir[i] * vc.idds[i] * dt
- for i in range(vri.n_samples):
- for j in range(vc.n_fields):
- dvs[j] = offset_interpolate(vc.dims, dp,
- vc.data[j] + offset)
- FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits,
- vri.fits, vri.field_table_ids, vri.grey_opacity)
- for j in range(3):
- dp[j] += ds[j]
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef void volume_render_gradient_sampler(
- VolumeContainer *vc,
- np.float64_t v_pos[3],
- np.float64_t v_dir[3],
- np.float64_t enter_t,
- np.float64_t exit_t,
- int index[3],
- void *data) nogil:
- cdef ImageAccumulator *im = <ImageAccumulator *> data
- cdef VolumeRenderAccumulator *vri = <VolumeRenderAccumulator *> \
- im.supp_data
- # we assume this has vertex-centered data.
- cdef int offset = index[0] * (vc.dims[1] + 1) * (vc.dims[2] + 1) \
- + index[1] * (vc.dims[2] + 1) + index[2]
- cdef np.float64_t dp[3]
- cdef np.float64_t ds[3]
- cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
- cdef np.float64_t dvs[6]
- cdef np.float64_t *grad
- grad = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
- for i in range(3):
- dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
- dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
- dp[i] *= vc.idds[i]
- ds[i] = v_dir[i] * vc.idds[i] * dt
- for i in range(vri.n_samples):
- for j in range(vc.n_fields):
- dvs[j] = offset_interpolate(vc.dims, dp,
- vc.data[j] + offset)
- eval_gradient(vc.dims, dp, vc.data[0] + offset, grad)
- FIT_eval_transfer_with_light(dt, dvs, grad,
- vri.light_dir, vri.light_rgba,
- im.rgba, vri.n_fits,
- vri.fits, vri.field_table_ids, vri.grey_opacity)
- for j in range(3):
- dp[j] += ds[j]
- free(grad)
-
-cdef class star_kdtree_container:
- cdef kdtree_utils.kdtree *tree
- cdef public np.float64_t sigma
- cdef public np.float64_t coeff
-
- def __init__(self):
- self.tree = kdtree_utils.kd_create(3)
-
- def add_points(self,
- np.ndarray[np.float64_t, ndim=1] pos_x,
- np.ndarray[np.float64_t, ndim=1] pos_y,
- np.ndarray[np.float64_t, ndim=1] pos_z,
- np.ndarray[np.float64_t, ndim=2] star_colors):
- cdef int i
- cdef np.float64_t *pointer = <np.float64_t *> star_colors.data
- for i in range(pos_x.shape[0]):
- kdtree_utils.kd_insert3(self.tree,
- pos_x[i], pos_y[i], pos_z[i], <void *> (pointer + i*3))
-
- def __dealloc__(self):
- kdtree_utils.kd_free(self.tree)
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef void volume_render_stars_sampler(
- VolumeContainer *vc,
- np.float64_t v_pos[3],
- np.float64_t v_dir[3],
- np.float64_t enter_t,
- np.float64_t exit_t,
- int index[3],
- void *data) nogil:
- cdef ImageAccumulator *im = <ImageAccumulator *> data
- cdef VolumeRenderAccumulator *vri = <VolumeRenderAccumulator *> \
- im.supp_data
- cdef kdtree_utils.kdres *ballq = NULL
- # we assume this has vertex-centered data.
- cdef int offset = index[0] * (vc.dims[1] + 1) * (vc.dims[2] + 1) \
- + index[1] * (vc.dims[2] + 1) + index[2]
- cdef np.float64_t slopes[6]
- cdef np.float64_t dp[3]
- cdef np.float64_t ds[3]
- cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
- cdef np.float64_t dvs[6]
- cdef np.float64_t cell_left[3]
- cdef np.float64_t local_dds[3]
- cdef np.float64_t pos[3]
- cdef int nstars, i, j
- cdef np.float64_t *colors = NULL
- cdef np.float64_t gexp, gaussian, px, py, pz
- px = py = pz = -1
- for i in range(3):
- dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
- dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
- dp[i] *= vc.idds[i]
- ds[i] = v_dir[i] * vc.idds[i] * dt
- for i in range(vc.n_fields):
- slopes[i] = offset_interpolate(vc.dims, dp,
- vc.data[i] + offset)
- cdef np.float64_t temp
- # Now we get the ball-tree result for the stars near our cell center.
- for i in range(3):
- cell_left[i] = index[i] * vc.dds[i] + vc.left_edge[i]
- pos[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
- local_dds[i] = v_dir[i] * dt
- ballq = kdtree_utils.kd_nearest_range3(
- vri.star_list, cell_left[0] + vc.dds[0]*0.5,
- cell_left[1] + vc.dds[1]*0.5,
- cell_left[2] + vc.dds[2]*0.5,
- vri.star_er + 0.9*vc.dds[0])
- # ~0.866 + a bit
-
- nstars = kdtree_utils.kd_res_size(ballq)
- for i in range(vc.n_fields):
- temp = slopes[i]
- slopes[i] -= offset_interpolate(vc.dims, dp,
- vc.data[i] + offset)
- slopes[i] *= -1.0/vri.n_samples
- dvs[i] = temp
- for _ in range(vri.n_samples):
- # Now we add the contribution from stars
- kdtree_utils.kd_res_rewind(ballq)
- for i in range(nstars):
- kdtree_utils.kd_res_item3(ballq, &px, &py, &pz)
- colors = <np.float64_t *> kdtree_utils.kd_res_item_data(ballq)
- kdtree_utils.kd_res_next(ballq)
- gexp = (px - pos[0])*(px - pos[0]) \
- + (py - pos[1])*(py - pos[1]) \
- + (pz - pos[2])*(pz - pos[2])
- gaussian = vri.star_coeff * exp(-gexp/vri.star_sigma_num)
- for j in range(3): im.rgba[j] += gaussian*dt*colors[j]
- for i in range(3):
- pos[i] += local_dds[i]
- FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits, vri.fits,
- vri.field_table_ids, vri.grey_opacity)
- for i in range(vc.n_fields):
- dvs[i] += slopes[i]
- kdtree_utils.kd_res_free(ballq)
-
-cdef class VolumeRenderSampler(ImageSampler):
- cdef VolumeRenderAccumulator *vra
- cdef public object tf_obj
- cdef public object my_field_tables
- cdef kdtree_utils.kdtree **trees
- cdef object tree_containers
- def __cinit__(self,
- np.ndarray vp_pos,
- np.ndarray vp_dir,
- np.ndarray[np.float64_t, ndim=1] center,
- bounds,
- np.ndarray[np.float64_t, ndim=3] image,
- np.ndarray[np.float64_t, ndim=1] x_vec,
- np.ndarray[np.float64_t, ndim=1] y_vec,
- np.ndarray[np.float64_t, ndim=1] width,
- tf_obj, n_samples = 10,
- star_list = None, **kwargs):
- ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
- x_vec, y_vec, width, **kwargs)
- cdef int i
- cdef np.ndarray[np.float64_t, ndim=1] temp
- # Now we handle tf_obj
- self.vra = <VolumeRenderAccumulator *> \
- malloc(sizeof(VolumeRenderAccumulator))
- self.vra.fits = <FieldInterpolationTable *> \
- malloc(sizeof(FieldInterpolationTable) * 6)
- self.vra.n_fits = tf_obj.n_field_tables
- assert(self.vra.n_fits <= 6)
- self.vra.grey_opacity = getattr(tf_obj, "grey_opacity", 0)
- self.vra.n_samples = n_samples
- self.my_field_tables = []
- for i in range(self.vra.n_fits):
- temp = tf_obj.tables[i].y
- FIT_initialize_table(&self.vra.fits[i],
- temp.shape[0],
- <np.float64_t *> temp.data,
- tf_obj.tables[i].x_bounds[0],
- tf_obj.tables[i].x_bounds[1],
- tf_obj.field_ids[i], tf_obj.weight_field_ids[i],
- tf_obj.weight_table_ids[i])
- self.my_field_tables.append((tf_obj.tables[i],
- tf_obj.tables[i].y))
- for i in range(6):
- self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
- self.supp_data = <void *> self.vra
- cdef star_kdtree_container skdc
- self.tree_containers = star_list
- if star_list is None:
- self.trees = NULL
- else:
- self.trees = <kdtree_utils.kdtree **> malloc(
- sizeof(kdtree_utils.kdtree*) * len(star_list))
- for i in range(len(star_list)):
- skdc = star_list[i]
- self.trees[i] = skdc.tree
-
- cdef void setup(self, PartitionedGrid pg):
- cdef star_kdtree_container star_tree
- if self.trees == NULL:
- self.sampler = volume_render_sampler
- else:
- star_tree = self.tree_containers[pg.parent_grid_id]
- self.vra.star_list = self.trees[pg.parent_grid_id]
- self.vra.star_sigma_num = 2.0*star_tree.sigma**2.0
- self.vra.star_er = 2.326 * star_tree.sigma
- self.vra.star_coeff = star_tree.coeff
- self.sampler = volume_render_stars_sampler
-
- def __dealloc__(self):
- for i in range(self.vra.n_fits):
- free(self.vra.fits[i].d0)
- free(self.vra.fits[i].dy)
- free(self.vra.fits)
- free(self.vra)
-
-cdef class LightSourceRenderSampler(ImageSampler):
- cdef VolumeRenderAccumulator *vra
- cdef public object tf_obj
- cdef public object my_field_tables
- def __cinit__(self,
- np.ndarray vp_pos,
- np.ndarray vp_dir,
- np.ndarray[np.float64_t, ndim=1] center,
- bounds,
- np.ndarray[np.float64_t, ndim=3] image,
- np.ndarray[np.float64_t, ndim=1] x_vec,
- np.ndarray[np.float64_t, ndim=1] y_vec,
- np.ndarray[np.float64_t, ndim=1] width,
- tf_obj, n_samples = 10,
- light_dir=[1.,1.,1.],
- light_rgba=[1.,1.,1.,1.],
- **kwargs):
- ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
- x_vec, y_vec, width, **kwargs)
- cdef int i
- cdef np.ndarray[np.float64_t, ndim=1] temp
- # Now we handle tf_obj
- self.vra = <VolumeRenderAccumulator *> \
- malloc(sizeof(VolumeRenderAccumulator))
- self.vra.fits = <FieldInterpolationTable *> \
- malloc(sizeof(FieldInterpolationTable) * 6)
- self.vra.n_fits = tf_obj.n_field_tables
- assert(self.vra.n_fits <= 6)
- self.vra.grey_opacity = getattr(tf_obj, "grey_opacity", 0)
- self.vra.n_samples = n_samples
- self.vra.light_dir = <np.float64_t *> malloc(sizeof(np.float64_t) * 3)
- self.vra.light_rgba = <np.float64_t *> malloc(sizeof(np.float64_t) * 4)
- light_dir /= np.sqrt(light_dir[0]**2 + light_dir[1]**2 + light_dir[2]**2)
- for i in range(3):
- self.vra.light_dir[i] = light_dir[i]
- for i in range(4):
- self.vra.light_rgba[i] = light_rgba[i]
- self.my_field_tables = []
- for i in range(self.vra.n_fits):
- temp = tf_obj.tables[i].y
- FIT_initialize_table(&self.vra.fits[i],
- temp.shape[0],
- <np.float64_t *> temp.data,
- tf_obj.tables[i].x_bounds[0],
- tf_obj.tables[i].x_bounds[1],
- tf_obj.field_ids[i], tf_obj.weight_field_ids[i],
- tf_obj.weight_table_ids[i])
- self.my_field_tables.append((tf_obj.tables[i],
- tf_obj.tables[i].y))
- for i in range(6):
- self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
- self.supp_data = <void *> self.vra
-
- cdef void setup(self, PartitionedGrid pg):
- self.sampler = volume_render_gradient_sampler
-
- def __dealloc__(self):
- for i in range(self.vra.n_fits):
- free(self.vra.fits[i].d0)
- free(self.vra.fits[i].dy)
- free(self.vra.light_dir)
- free(self.vra.light_rgba)
- free(self.vra.fits)
- free(self.vra)
-
-
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
cdef int walk_volume(VolumeContainer *vc,
np.float64_t v_pos[3],
np.float64_t v_dir[3],
- sampler_function *sampler,
+ sampler_function *sample,
void *data,
np.float64_t *return_t = NULL,
np.float64_t max_t = 1.0) nogil:
@@ -1031,7 +139,7 @@
else:
i = 2
exit_t = fmin(tmax[i], max_t)
- sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
+ sample(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
cur_ind[i] += step[i]
enter_t = tmax[i]
tmax[i] += tdelta[i]
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b yt/utilities/lib/image_samplers.pxd
--- /dev/null
+++ b/yt/utilities/lib/image_samplers.pxd
@@ -0,0 +1,78 @@
+"""
+Definitions for image samplers
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+cimport kdtree_utils
+from .volume_container cimport VolumeContainer
+from .lenses cimport \
+ calculate_extent_function, \
+ generate_vector_info_function, \
+ ImageContainer
+from .partitioned_grid cimport PartitionedGrid
+
+DEF Nch = 4
+
+# NOTE: We don't want to import the field_interpolator_tables here, as it
+# breaks a bunch of C++ interop. Maybe some day it won't. So, we just forward
+# declare.
+cdef struct VolumeRenderAccumulator
+
+cdef struct ImageAccumulator:
+ np.float64_t rgba[Nch]
+ void *supp_data
+
+cdef class ImageSampler:
+ cdef ImageContainer *image
+ cdef public object acenter, aimage, ax_vec, ay_vec
+ cdef public object azbuffer
+ cdef public object aimage_used
+ cdef public object amesh_lines
+ cdef void *supp_data
+ cdef np.float64_t width[3]
+ cdef public object lens_type
+ cdef calculate_extent_function *extent_function
+ cdef generate_vector_info_function *vector_function
+ cdef void setup(self, PartitionedGrid pg)
+ @staticmethod
+ cdef void sample(VolumeContainer *vc,
+ np.float64_t v_pos[3],
+ np.float64_t v_dir[3],
+ np.float64_t enter_t,
+ np.float64_t exit_t,
+ int index[3],
+ void *data) nogil
+
+cdef class ProjectionSampler(ImageSampler):
+ pass
+
+cdef class InterpolatedProjectionSampler(ImageSampler):
+ cdef VolumeRenderAccumulator *vra
+ cdef public object tf_obj
+ cdef public object my_field_tables
+
+cdef class VolumeRenderSampler(ImageSampler):
+ cdef VolumeRenderAccumulator *vra
+ cdef public object tf_obj
+ cdef public object my_field_tables
+ cdef kdtree_utils.kdtree **trees
+ cdef object tree_containers
+
+cdef class LightSourceRenderSampler(ImageSampler):
+ cdef VolumeRenderAccumulator *vra
+ cdef public object tf_obj
+ cdef public object my_field_tables
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b yt/utilities/lib/image_samplers.pyx
--- /dev/null
+++ b/yt/utilities/lib/image_samplers.pyx
@@ -0,0 +1,486 @@
+"""
+Image sampler definitions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+from libc.stdlib cimport malloc, calloc, free, abs
+from libc.math cimport exp, floor, log2, \
+ fabs, atan, atan2, asin, cos, sin, sqrt, acos, M_PI
+from yt.utilities.lib.fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
+from field_interpolation_tables cimport \
+ FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\
+ FIT_eval_transfer_with_light
+cimport lenses
+from .grid_traversal cimport walk_volume
+from .fixed_interpolator cimport \
+ offset_interpolate, \
+ fast_interpolate, \
+ trilinear_interpolate, \
+ eval_gradient, \
+ offset_fill, \
+ vertex_interp
+
+cdef extern from "platform_dep.h":
+ long int lrint(double x) nogil
+
+DEF Nch = 4
+
+from cython.parallel import prange, parallel, threadid
+from vec3_ops cimport dot, subtract, L2_norm, fma
+
+from cpython.exc cimport PyErr_CheckSignals
+
+cdef struct VolumeRenderAccumulator:
+ int n_fits
+ int n_samples
+ FieldInterpolationTable *fits
+ int field_table_ids[6]
+ np.float64_t star_coeff
+ np.float64_t star_er
+ np.float64_t star_sigma_num
+ kdtree_utils.kdtree *star_list
+ np.float64_t *light_dir
+ np.float64_t *light_rgba
+ int grey_opacity
+
+
+cdef class ImageSampler:
+ def __init__(self,
+ np.float64_t[:,:,:] vp_pos,
+ np.float64_t[:,:,:] vp_dir,
+ np.ndarray[np.float64_t, ndim=1] center,
+ bounds,
+ np.ndarray[np.float64_t, ndim=3] image,
+ np.ndarray[np.float64_t, ndim=1] x_vec,
+ np.ndarray[np.float64_t, ndim=1] y_vec,
+ np.ndarray[np.float64_t, ndim=1] width,
+ *args, **kwargs):
+ self.image = <ImageContainer *> calloc(sizeof(ImageContainer), 1)
+ cdef np.float64_t[:,:] zbuffer
+ cdef np.int64_t[:,:] image_used
+ cdef np.int64_t[:,:] mesh_lines
+ cdef np.float64_t[:,:] camera_data
+ cdef int i
+
+ camera_data = kwargs.pop("camera_data", None)
+ if camera_data is not None:
+ self.image.camera_data = camera_data
+
+ zbuffer = kwargs.pop("zbuffer", None)
+ if zbuffer is None:
+ zbuffer = np.ones((image.shape[0], image.shape[1]), "float64")
+
+ image_used = np.zeros((image.shape[0], image.shape[1]), "int64")
+ mesh_lines = np.zeros((image.shape[0], image.shape[1]), "int64")
+
+ self.lens_type = kwargs.pop("lens_type", None)
+ if self.lens_type == "plane-parallel":
+ self.extent_function = lenses.calculate_extent_plane_parallel
+ self.vector_function = lenses.generate_vector_info_plane_parallel
+ else:
+ if not (vp_pos.shape[0] == vp_dir.shape[0] == image.shape[0]) or \
+ not (vp_pos.shape[1] == vp_dir.shape[1] == image.shape[1]):
+ msg = "Bad lens shape / direction for %s\n" % (self.lens_type)
+ msg += "Shapes: (%s - %s - %s) and (%s - %s - %s)" % (
+ vp_pos.shape[0], vp_dir.shape[0], image.shape[0],
+ vp_pos.shape[1], vp_dir.shape[1], image.shape[1])
+ raise RuntimeError(msg)
+
+ if camera_data is not None and self.lens_type == 'perspective':
+ self.extent_function = lenses.calculate_extent_perspective
+ else:
+ self.extent_function = lenses.calculate_extent_null
+ self.\
+ vector_function = lenses.generate_vector_info_null
+
+ # These assignments are so we can track the objects and prevent their
+ # de-allocation from reference counts. Note that we do this to the
+ # "atleast_3d" versions. Also, note that we re-assign the input
+ # arguments.
+ self.image.vp_pos = vp_pos
+ self.image.vp_dir = vp_dir
+ self.image.image = self.aimage = image
+ self.acenter = center
+ self.image.center = <np.float64_t *> center.data
+ self.ax_vec = x_vec
+ self.image.x_vec = <np.float64_t *> x_vec.data
+ self.ay_vec = y_vec
+ self.image.y_vec = <np.float64_t *> y_vec.data
+ self.image.zbuffer = self.azbuffer = zbuffer
+ self.image.image_used = self.aimage_used = image_used
+ self.image.mesh_lines = self.amesh_lines = mesh_lines
+ self.image.nv[0] = image.shape[0]
+ self.image.nv[1] = image.shape[1]
+ for i in range(4): self.image.bounds[i] = bounds[i]
+ self.image.pdx = (bounds[1] - bounds[0])/self.image.nv[0]
+ self.image.pdy = (bounds[3] - bounds[2])/self.image.nv[1]
+ for i in range(3):
+ self.width[i] = width[i]
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ @cython.cdivision(True)
+ def __call__(self, PartitionedGrid pg, int num_threads = 0):
+ # This routine will iterate over all of the vectors and cast each in
+ # turn. Might benefit from a more sophisticated intersection check,
+ # like http://courses.csusm.edu/cs697exz/ray_box.htm
+ cdef int vi, vj, hit, i, j
+ cdef np.int64_t iter[4]
+ cdef VolumeContainer *vc = pg.container
+ cdef ImageContainer *im = self.image
+ self.setup(pg)
+ cdef np.float64_t *v_pos
+ cdef np.float64_t *v_dir
+ cdef np.float64_t max_t
+ hit = 0
+ cdef np.int64_t nx, ny, size
+ self.extent_function(self.image, vc, iter)
+ iter[0] = i64clip(iter[0]-1, 0, im.nv[0])
+ iter[1] = i64clip(iter[1]+1, 0, im.nv[0])
+ iter[2] = i64clip(iter[2]-1, 0, im.nv[1])
+ iter[3] = i64clip(iter[3]+1, 0, im.nv[1])
+ nx = (iter[1] - iter[0])
+ ny = (iter[3] - iter[2])
+ size = nx * ny
+ cdef ImageAccumulator *idata
+ cdef np.float64_t width[3]
+ cdef int chunksize = 100
+ for i in range(3):
+ width[i] = self.width[i]
+ with nogil, parallel(num_threads = num_threads):
+ idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
+ idata.supp_data = self.supp_data
+ v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+ v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+ for j in prange(size, schedule="static", chunksize=chunksize):
+ vj = j % ny
+ vi = (j - vj) / ny + iter[0]
+ vj = vj + iter[2]
+ # Dynamically calculate the position
+ self.vector_function(im, vi, vj, width, v_dir, v_pos)
+ for i in range(Nch):
+ idata.rgba[i] = im.image[vi, vj, i]
+ max_t = fclip(im.zbuffer[vi, vj], 0.0, 1.0)
+ walk_volume(vc, v_pos, v_dir, self.sample,
+ (<void *> idata), NULL, max_t)
+ if (j % (10*chunksize)) == 0:
+ with gil:
+ PyErr_CheckSignals()
+ for i in range(Nch):
+ im.image[vi, vj, i] = idata.rgba[i]
+ free(idata)
+ free(v_pos)
+ free(v_dir)
+ return hit
+
+ cdef void setup(self, PartitionedGrid pg):
+ return
+
+ @staticmethod
+ cdef void sample(
+ VolumeContainer *vc,
+ np.float64_t v_pos[3],
+ np.float64_t v_dir[3],
+ np.float64_t enter_t,
+ np.float64_t exit_t,
+ int index[3],
+ void *data) nogil:
+ return
+
+ def ensure_code_unit_params(self, params):
+ for param_name in ['center', 'vp_pos', 'vp_dir', 'width']:
+ param = params[param_name]
+ if hasattr(param, 'in_units'):
+ params[param_name] = param.in_units('code_length')
+ bounds = params['bounds']
+ if hasattr(bounds[0], 'units'):
+ params['bounds'] = tuple(b.in_units('code_length').d for b in bounds)
+
+ return params
+
+ def __dealloc__(self):
+ self.image.image = None
+ self.image.vp_pos = None
+ self.image.vp_dir = None
+ self.image.zbuffer = None
+ self.image.camera_data = None
+ self.image.image_used = None
+ free(self.image)
+
+cdef class ProjectionSampler(ImageSampler):
+
+ @staticmethod
+ cdef void sample(
+ VolumeContainer *vc,
+ np.float64_t v_pos[3],
+ np.float64_t v_dir[3],
+ np.float64_t enter_t,
+ np.float64_t exit_t,
+ int index[3],
+ void *data) nogil:
+ cdef ImageAccumulator *im = <ImageAccumulator *> data
+ cdef int i
+ cdef np.float64_t dl = (exit_t - enter_t)
+ cdef int di = (index[0]*vc.dims[1]+index[1])*vc.dims[2]+index[2]
+ for i in range(imin(4, vc.n_fields)):
+ im.rgba[i] += vc.data[i][di] * dl
+
+
+cdef class InterpolatedProjectionSampler(ImageSampler):
+ def __cinit__(self,
+ np.ndarray vp_pos,
+ np.ndarray vp_dir,
+ np.ndarray[np.float64_t, ndim=1] center,
+ bounds,
+ np.ndarray[np.float64_t, ndim=3] image,
+ np.ndarray[np.float64_t, ndim=1] x_vec,
+ np.ndarray[np.float64_t, ndim=1] y_vec,
+ np.ndarray[np.float64_t, ndim=1] width,
+ n_samples = 10, **kwargs):
+ ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
+ x_vec, y_vec, width, **kwargs)
+ # Now we handle tf_obj
+ self.vra = <VolumeRenderAccumulator *> \
+ malloc(sizeof(VolumeRenderAccumulator))
+ self.vra.n_samples = n_samples
+ self.supp_data = <void *> self.vra
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ @cython.cdivision(True)
+ @staticmethod
+ cdef void sample(
+ VolumeContainer *vc,
+ np.float64_t v_pos[3],
+ np.float64_t v_dir[3],
+ np.float64_t enter_t,
+ np.float64_t exit_t,
+ int index[3],
+ void *data) nogil:
+ cdef ImageAccumulator *im = <ImageAccumulator *> data
+ cdef VolumeRenderAccumulator *vri = <VolumeRenderAccumulator *> \
+ im.supp_data
+ # we assume this has vertex-centered data.
+ cdef int offset = index[0] * (vc.dims[1] + 1) * (vc.dims[2] + 1) \
+ + index[1] * (vc.dims[2] + 1) + index[2]
+ cdef np.float64_t dp[3]
+ cdef np.float64_t ds[3]
+ cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
+ cdef np.float64_t dvs[6]
+ for i in range(3):
+ dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+ dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
+ dp[i] *= vc.idds[i]
+ ds[i] = v_dir[i] * vc.idds[i] * dt
+ for i in range(vri.n_samples):
+ for j in range(vc.n_fields):
+ dvs[j] = offset_interpolate(vc.dims, dp,
+ vc.data[j] + offset)
+ for j in range(imin(3, vc.n_fields)):
+ im.rgba[j] += dvs[j] * dt
+ for j in range(3):
+ dp[j] += ds[j]
+
+
+cdef class VolumeRenderSampler(ImageSampler):
+ def __cinit__(self,
+ np.ndarray vp_pos,
+ np.ndarray vp_dir,
+ np.ndarray[np.float64_t, ndim=1] center,
+ bounds,
+ np.ndarray[np.float64_t, ndim=3] image,
+ np.ndarray[np.float64_t, ndim=1] x_vec,
+ np.ndarray[np.float64_t, ndim=1] y_vec,
+ np.ndarray[np.float64_t, ndim=1] width,
+ tf_obj, n_samples = 10,
+ star_list = None, **kwargs):
+ ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
+ x_vec, y_vec, width, **kwargs)
+ cdef int i
+ cdef np.ndarray[np.float64_t, ndim=1] temp
+ # Now we handle tf_obj
+ self.vra = <VolumeRenderAccumulator *> \
+ malloc(sizeof(VolumeRenderAccumulator))
+ self.vra.fits = <FieldInterpolationTable *> \
+ malloc(sizeof(FieldInterpolationTable) * 6)
+ self.vra.n_fits = tf_obj.n_field_tables
+ assert(self.vra.n_fits <= 6)
+ self.vra.grey_opacity = getattr(tf_obj, "grey_opacity", 0)
+ self.vra.n_samples = n_samples
+ self.my_field_tables = []
+ for i in range(self.vra.n_fits):
+ temp = tf_obj.tables[i].y
+ FIT_initialize_table(&self.vra.fits[i],
+ temp.shape[0],
+ <np.float64_t *> temp.data,
+ tf_obj.tables[i].x_bounds[0],
+ tf_obj.tables[i].x_bounds[1],
+ tf_obj.field_ids[i], tf_obj.weight_field_ids[i],
+ tf_obj.weight_table_ids[i])
+ self.my_field_tables.append((tf_obj.tables[i],
+ tf_obj.tables[i].y))
+ for i in range(6):
+ self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
+ self.supp_data = <void *> self.vra
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ @cython.cdivision(True)
+ @staticmethod
+ cdef void sample(
+ VolumeContainer *vc,
+ np.float64_t v_pos[3],
+ np.float64_t v_dir[3],
+ np.float64_t enter_t,
+ np.float64_t exit_t,
+ int index[3],
+ void *data) nogil:
+ cdef ImageAccumulator *im = <ImageAccumulator *> data
+ cdef VolumeRenderAccumulator *vri = <VolumeRenderAccumulator *> \
+ im.supp_data
+ # we assume this has vertex-centered data.
+ cdef int offset = index[0] * (vc.dims[1] + 1) * (vc.dims[2] + 1) \
+ + index[1] * (vc.dims[2] + 1) + index[2]
+ cdef int cell_offset = index[0] * (vc.dims[1]) * (vc.dims[2]) \
+ + index[1] * (vc.dims[2]) + index[2]
+ if vc.mask[cell_offset] != 1:
+ return
+ cdef np.float64_t dp[3]
+ cdef np.float64_t ds[3]
+ cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
+ cdef np.float64_t dvs[6]
+ for i in range(3):
+ dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+ dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
+ dp[i] *= vc.idds[i]
+ ds[i] = v_dir[i] * vc.idds[i] * dt
+ for i in range(vri.n_samples):
+ for j in range(vc.n_fields):
+ dvs[j] = offset_interpolate(vc.dims, dp,
+ vc.data[j] + offset)
+ FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits,
+ vri.fits, vri.field_table_ids, vri.grey_opacity)
+ for j in range(3):
+ dp[j] += ds[j]
+
+ def __dealloc__(self):
+ for i in range(self.vra.n_fits):
+ free(self.vra.fits[i].d0)
+ free(self.vra.fits[i].dy)
+ free(self.vra.fits)
+ free(self.vra)
+
+cdef class LightSourceRenderSampler(ImageSampler):
+ def __cinit__(self,
+ np.ndarray vp_pos,
+ np.ndarray vp_dir,
+ np.ndarray[np.float64_t, ndim=1] center,
+ bounds,
+ np.ndarray[np.float64_t, ndim=3] image,
+ np.ndarray[np.float64_t, ndim=1] x_vec,
+ np.ndarray[np.float64_t, ndim=1] y_vec,
+ np.ndarray[np.float64_t, ndim=1] width,
+ tf_obj, n_samples = 10,
+ light_dir=[1.,1.,1.],
+ light_rgba=[1.,1.,1.,1.],
+ **kwargs):
+ ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
+ x_vec, y_vec, width, **kwargs)
+ cdef int i
+ cdef np.ndarray[np.float64_t, ndim=1] temp
+ # Now we handle tf_obj
+ self.vra = <VolumeRenderAccumulator *> \
+ malloc(sizeof(VolumeRenderAccumulator))
+ self.vra.fits = <FieldInterpolationTable *> \
+ malloc(sizeof(FieldInterpolationTable) * 6)
+ self.vra.n_fits = tf_obj.n_field_tables
+ assert(self.vra.n_fits <= 6)
+ self.vra.grey_opacity = getattr(tf_obj, "grey_opacity", 0)
+ self.vra.n_samples = n_samples
+ self.vra.light_dir = <np.float64_t *> malloc(sizeof(np.float64_t) * 3)
+ self.vra.light_rgba = <np.float64_t *> malloc(sizeof(np.float64_t) * 4)
+ light_dir /= np.sqrt(light_dir[0]**2 + light_dir[1]**2 + light_dir[2]**2)
+ for i in range(3):
+ self.vra.light_dir[i] = light_dir[i]
+ for i in range(4):
+ self.vra.light_rgba[i] = light_rgba[i]
+ self.my_field_tables = []
+ for i in range(self.vra.n_fits):
+ temp = tf_obj.tables[i].y
+ FIT_initialize_table(&self.vra.fits[i],
+ temp.shape[0],
+ <np.float64_t *> temp.data,
+ tf_obj.tables[i].x_bounds[0],
+ tf_obj.tables[i].x_bounds[1],
+ tf_obj.field_ids[i], tf_obj.weight_field_ids[i],
+ tf_obj.weight_table_ids[i])
+ self.my_field_tables.append((tf_obj.tables[i],
+ tf_obj.tables[i].y))
+ for i in range(6):
+ self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
+ self.supp_data = <void *> self.vra
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ @cython.cdivision(True)
+ @staticmethod
+ cdef void sample(
+ VolumeContainer *vc,
+ np.float64_t v_pos[3],
+ np.float64_t v_dir[3],
+ np.float64_t enter_t,
+ np.float64_t exit_t,
+ int index[3],
+ void *data) nogil:
+ cdef ImageAccumulator *im = <ImageAccumulator *> data
+ cdef VolumeRenderAccumulator *vri = <VolumeRenderAccumulator *> \
+ im.supp_data
+ # we assume this has vertex-centered data.
+ cdef int offset = index[0] * (vc.dims[1] + 1) * (vc.dims[2] + 1) \
+ + index[1] * (vc.dims[2] + 1) + index[2]
+ cdef np.float64_t dp[3]
+ cdef np.float64_t ds[3]
+ cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
+ cdef np.float64_t dvs[6]
+ cdef np.float64_t *grad
+ grad = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+ for i in range(3):
+ dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+ dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
+ dp[i] *= vc.idds[i]
+ ds[i] = v_dir[i] * vc.idds[i] * dt
+ for i in range(vri.n_samples):
+ for j in range(vc.n_fields):
+ dvs[j] = offset_interpolate(vc.dims, dp,
+ vc.data[j] + offset)
+ eval_gradient(vc.dims, dp, vc.data[0] + offset, grad)
+ FIT_eval_transfer_with_light(dt, dvs, grad,
+ vri.light_dir, vri.light_rgba,
+ im.rgba, vri.n_fits,
+ vri.fits, vri.field_table_ids, vri.grey_opacity)
+ for j in range(3):
+ dp[j] += ds[j]
+ free(grad)
+
+
+ def __dealloc__(self):
+ for i in range(self.vra.n_fits):
+ free(self.vra.fits[i].d0)
+ free(self.vra.fits[i].dy)
+ free(self.vra.light_dir)
+ free(self.vra.light_rgba)
+ free(self.vra.fits)
+ free(self.vra)
diff -r a4ff0334210d68c0cafc9337777e2f0c608c411b -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b yt/utilities/lib/lenses.pxd
--- /dev/null
+++ b/yt/utilities/lib/lenses.pxd
@@ -0,0 +1,60 @@
+"""
+Definitions for the lens code
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+from .volume_container cimport VolumeContainer
+from vec3_ops cimport dot, subtract, L2_norm, fma
+from libc.math cimport exp, floor, log2, \
+ fabs, atan, atan2, asin, cos, sin, sqrt, acos, M_PI
+from yt.utilities.lib.fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
+
+cdef extern from "platform_dep.h":
+ long int lrint(double x) nogil
+
+cdef extern from "limits.h":
+ cdef int SHRT_MAX
+
+cdef struct ImageContainer:
+ np.float64_t[:,:,:] vp_pos
+ np.float64_t[:,:,:] vp_dir
+ np.float64_t *center
+ np.float64_t[:,:,:] image
+ np.float64_t[:,:] zbuffer
+ np.int64_t[:,:] image_used
+ np.int64_t[:,:] mesh_lines
+ np.float64_t pdx, pdy
+ np.float64_t bounds[4]
+ np.float64_t[:,:] camera_data # position, width, unit_vec[0,2]
+ int nv[2]
+ np.float64_t *x_vec
+ np.float64_t *y_vec
+
+
+ctypedef void calculate_extent_function(ImageContainer *image,
+ VolumeContainer *vc, np.int64_t rv[4]) nogil
+
+ctypedef void generate_vector_info_function(ImageContainer *im,
+ np.int64_t vi, np.int64_t vj,
+ np.float64_t width[2],
+ np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil
+
+cdef generate_vector_info_function generate_vector_info_plane_parallel
+cdef generate_vector_info_function generate_vector_info_null
+cdef calculate_extent_function calculate_extent_plane_parallel
+cdef calculate_extent_function calculate_extent_perspective
+cdef calculate_extent_function calculate_extent_null
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/a0f0ac96e5ac/
Changeset: a0f0ac96e5ac
Branch: yt
User: ngoldbaum
Date: 2016-07-17 16:20:58+00:00
Summary: Update rendering_with_box_and_grids example
Affected #: 1 file
diff -r 9cd33e2b7f539392c680ea609bf91503f4cafc4b -r a0f0ac96e5ac42a7b7e11e243ba06d049c954f04 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -1,22 +1,23 @@
import yt
-import numpy as np
-from yt.visualization.volume_rendering.api import BoxSource, CoordinateVectorSource
# Load the dataset.
ds = yt.load("Enzo_64/DD0043/data0043")
-sc = yt.create_scene(ds, ('gas','density'))
-sc.get_source(0).transfer_function.grey_opacity=True
+sc = yt.create_scene(ds, ('gas', 'density'))
-sc.annotate_domain(ds)
+# You may need to adjust the alpha values to get a rendering with good contrast
+# For annotate_domain, the fourth color value is alpha.
+
+# Draw the domain boundary
+sc.annotate_domain(ds, color=[1, 1, 1, 0.01])
sc.render()
sc.save("%s_vr_domain.png" % ds)
-sc.annotate_grids(ds)
+# Draw the grid boundaries
+sc.annotate_grids(ds, alpha=0.01)
sc.render()
sc.save("%s_vr_grids.png" % ds)
-# Here we can draw the coordinate vectors on top of the image by processing
-# it through the camera. Then save it out.
-sc.annotate_axes()
+# Draw a coordinate axes triad
+sc.annotate_axes(alpha=0.01)
sc.render()
sc.save("%s_vr_coords.png" % ds)
https://bitbucket.org/yt_analysis/yt/commits/78124d806256/
Changeset: 78124d806256
Branch: yt
User: ngoldbaum
Date: 2016-07-18 03:54:48+00:00
Summary: correct doc typo
Affected #: 1 file
diff -r a0f0ac96e5ac42a7b7e11e243ba06d049c954f04 -r 78124d806256bb136fc8b251789c73bc35dbd662 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -318,7 +318,7 @@
Volume Rendering with Lines
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This recipe demonstrates how to make a volume rendering composited with point
+This recipe demonstrates how to make a volume rendering composited with line
sources.
.. yt_cookbook:: vol-lines.py
https://bitbucket.org/yt_analysis/yt/commits/a0b32f9139df/
Changeset: a0b32f9139df
Branch: yt
User: ngoldbaum
Date: 2016-07-18 04:13:04+00:00
Summary: Adjust recipes a bit for more pleasing visuals
Affected #: 3 files
diff -r 78124d806256bb136fc8b251789c73bc35dbd662 -r a0b32f9139df60d41f0c7453c399b5b46b93600d doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -9,15 +9,12 @@
# Draw the domain boundary
sc.annotate_domain(ds, color=[1, 1, 1, 0.01])
-sc.render()
-sc.save("%s_vr_domain.png" % ds)
+sc.save("%s_vr_domain.png" % ds, sigma_clip=4)
# Draw the grid boundaries
sc.annotate_grids(ds, alpha=0.01)
-sc.render()
-sc.save("%s_vr_grids.png" % ds)
+sc.save("%s_vr_grids.png" % ds, sigma_clip=4)
# Draw a coordinate axes triad
sc.annotate_axes(alpha=0.01)
-sc.render()
-sc.save("%s_vr_coords.png" % ds)
+sc.save("%s_vr_coords.png" % ds, sigma_clip=4)
diff -r 78124d806256bb136fc8b251789c73bc35dbd662 -r a0b32f9139df60d41f0c7453c399b5b46b93600d doc/source/cookbook/vol-lines.py
--- a/doc/source/cookbook/vol-lines.py
+++ b/doc/source/cookbook/vol-lines.py
@@ -10,7 +10,7 @@
np.random.seed(1234567)
nlines = 50
-vertices = (np.random.random([nlines, 2, 3]) - 0.5) * 250 * kpc
+vertices = (np.random.random([nlines, 2, 3]) - 0.5) * 200 * kpc
colors = np.random.random([nlines, 4])
colors[:, 3] = 0.1
@@ -19,4 +19,4 @@
sc.camera.width = 300*kpc
-sc.save()
+sc.save(sigma_clip=4.0)
diff -r 78124d806256bb136fc8b251789c73bc35dbd662 -r a0b32f9139df60d41f0c7453c399b5b46b93600d doc/source/cookbook/vol-points.py
--- a/doc/source/cookbook/vol-points.py
+++ b/doc/source/cookbook/vol-points.py
@@ -10,14 +10,20 @@
np.random.seed(1234567)
npoints = 1000
+
+# Random particle positions
vertices = np.random.random([npoints, 3])*200*kpc
+
+# Random colors
colors = np.random.random([npoints, 4])
-colors[:, 3] = 1
-colors *= 0.1
+
+# Set alpha value to something that produces a good contrast with the volume
+# rendering
+colors[:, 3] = 0.1
points = PointSource(vertices, colors=colors)
sc.add_source(points)
sc.camera.width = 300*kpc
-sc.save()
+sc.save(sigma_clip=5)
https://bitbucket.org/yt_analysis/yt/commits/1d031c0d50f1/
Changeset: 1d031c0d50f1
Branch: yt
User: ngoldbaum
Date: 2016-07-19 15:51:53+00:00
Summary: Fix buggy indexing of colors array for zlines
Affected #: 1 file
diff -r a0b32f9139df60d41f0c7453c399b5b46b93600d -r 1d031c0d50f1d12073e7ee46d380f4a62eb3f4cc yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -27,6 +27,7 @@
from libc.string cimport strcmp
from cython.view cimport memoryview
+from cython.view cimport array as cvarray
from cpython cimport buffer
@@ -327,11 +328,12 @@
cdef int nx = image.shape[0]
cdef int ny = image.shape[1]
cdef int nl = xs.shape[0]
- cdef np.float64_t alpha[4]
- cdef int i, j
+ cdef np.float64_t[:] alpha
+ cdef int i, j, c
cdef int dx, dy, sx, sy, e2, err
cdef np.int64_t x0, x1, y0, y1, yi0
cdef np.float64_t z0, z1, dzx, dzy
+ alpha = np.zeros(4)
for j in range(0, nl, 2):
# From wikipedia http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
x0 = xs[j]
@@ -348,10 +350,11 @@
if crop == 1 and (dx > nx/2.0 or dy > ny/2.0):
continue
+ c = j/points_per_color/2
+
for i in range(3):
- alpha[i] = (colors[j/points_per_color, i] *
- colors[j/points_per_color, 3])
- alpha[3] = colors[j/points_per_color, 3]
+ alpha[i] = colors[c, i] * colors[c, 3]
+ alpha[3] = colors[c, 3]
if x0 < x1:
sx = 1
@@ -419,21 +422,22 @@
cdef int nx = image.shape[0]
cdef int ny = image.shape[1]
cdef int nl = xs.shape[0]
- cdef np.float64_t alpha[4]
+ cdef np.float64_t[:] alpha
cdef np.float64_t talpha
- cdef int i, j
+ cdef int i, j, c
cdef np.int64_t x0, y0, yi0
cdef np.float64_t z0
+ alpha = np.zeros(4)
for j in range(0, nl):
x0 = xs[j]
y0 = ys[j]
z0 = zs[j]
if (x0 < 0 or x0 >= nx): continue
if (y0 < 0 or y0 >= ny): continue
+ c = j/points_per_color
for i in range(3):
- alpha[i] = (colors[j/points_per_color, i] *
- colors[j/points_per_color, 3])
- alpha[3] = colors[j/points_per_color, 3]
+ alpha[i] = colors[c, i] * colors[c, 3]
+ alpha[3] = colors[c, 3]
if flip:
yi0 = ny - y0
else:
https://bitbucket.org/yt_analysis/yt/commits/01b0fc5dc11d/
Changeset: 01b0fc5dc11d
Branch: yt
User: ngoldbaum
Date: 2016-07-19 21:07:43+00:00
Summary: Bump minimum cython version to 0.24.
Affected #: 1 file
diff -r 1d031c0d50f1d12073e7ee46d380f4a62eb3f4cc -r 01b0fc5dc11de409d8b58d75b43fe3e369c7b134 setup.py
--- a/setup.py
+++ b/setup.py
@@ -378,7 +378,7 @@
package_data = {'':['*.pxd']},
setup_requires=[
'numpy',
- 'cython>=0.22',
+ 'cython>=0.24',
],
install_requires=[
'matplotlib',
https://bitbucket.org/yt_analysis/yt/commits/ea20f20551a4/
Changeset: ea20f20551a4
Branch: yt
User: ngoldbaum
Date: 2016-07-19 21:08:06+00:00
Summary: Fix fencepost error in grid_traversal.pyx
Affected #: 1 file
diff -r 01b0fc5dc11de409d8b58d75b43fe3e369c7b134 -r ea20f20551a4dba108b3bd07b31a8e919079fe9c yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -57,9 +57,9 @@
cdef np.float64_t tl, temp_x, temp_y = -1
if max_t > 1.0: max_t = 1.0
direction = -1
- if vc.left_edge[0] <= v_pos[0] and v_pos[0] <= vc.right_edge[0] and \
- vc.left_edge[1] <= v_pos[1] and v_pos[1] <= vc.right_edge[1] and \
- vc.left_edge[2] <= v_pos[2] and v_pos[2] <= vc.right_edge[2]:
+ if vc.left_edge[0] <= v_pos[0] and v_pos[0] < vc.right_edge[0] and \
+ vc.left_edge[1] <= v_pos[1] and v_pos[1] < vc.right_edge[1] and \
+ vc.left_edge[2] <= v_pos[2] and v_pos[2] < vc.right_edge[2]:
intersect_t = 0.0
direction = 3
for i in range(3):
https://bitbucket.org/yt_analysis/yt/commits/c4a48f14a854/
Changeset: c4a48f14a854
Branch: yt
User: ngoldbaum
Date: 2016-07-19 21:22:11+00:00
Summary: Bump minimum required setuptools version.
This is to ensure the fix for https://github.com/pypa/setuptools/issues/488
is available
Affected #: 1 file
diff -r ea20f20551a4dba108b3bd07b31a8e919079fe9c -r c4a48f14a8543d6bac8ab540bf5f593a6d69bca4 setup.py
--- a/setup.py
+++ b/setup.py
@@ -32,14 +32,6 @@
except pkg_resources.DistributionNotFound:
pass # yay!
-setuptools_ver = \
- LooseVersion(pkg_resources.get_distribution("setuptools").version)
-if setuptools_ver < LooseVersion("18.0"):
- print("Your setuptools version is too old to properly handle cython extensions.")
- print("Please update setuptools before proceeding:")
- print(" pip install -U setuptools")
- sys.exit(1)
-
MAPSERVER_FILES = []
MAPSERVER_DIRS = [
"",
@@ -382,7 +374,7 @@
],
install_requires=[
'matplotlib',
- 'setuptools>=18.0',
+ 'setuptools>=19.6',
'sympy',
'numpy',
'IPython',
https://bitbucket.org/yt_analysis/yt/commits/f2508397629e/
Changeset: f2508397629e
Branch: yt
User: ngoldbaum
Date: 2016-07-20 17:33:43+00:00
Summary: Merged in ngoldbaum/yt (pull request #2285)
Fix issues with opaque sources fading volume renderings. Closes #1129. Closes #1202. Closes #1194.
Affected #: 14 files
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -303,6 +303,26 @@
.. yt_cookbook:: vol-annotated.py
+.. _cookbook-vol-points:
+
+Volume Rendering with Points
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with point
+sources. This could represent star or dark matter particles, for example.
+
+.. yt_cookbook:: vol-points.py
+
+.. _cookbook-vol-lines:
+
+Volume Rendering with Lines
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with line
+sources.
+
+.. yt_cookbook:: vol-lines.py
+
.. _cookbook-opengl_vr:
Advanced Interactive Data Visualization
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -1,22 +1,20 @@
import yt
-import numpy as np
-from yt.visualization.volume_rendering.api import BoxSource, CoordinateVectorSource
# Load the dataset.
ds = yt.load("Enzo_64/DD0043/data0043")
-sc = yt.create_scene(ds, ('gas','density'))
-sc.get_source(0).transfer_function.grey_opacity=True
+sc = yt.create_scene(ds, ('gas', 'density'))
-sc.annotate_domain(ds)
-sc.render()
-sc.save("%s_vr_domain.png" % ds)
+# You may need to adjust the alpha values to get a rendering with good contrast
+# For annotate_domain, the fourth color value is alpha.
-sc.annotate_grids(ds)
-sc.render()
-sc.save("%s_vr_grids.png" % ds)
+# Draw the domain boundary
+sc.annotate_domain(ds, color=[1, 1, 1, 0.01])
+sc.save("%s_vr_domain.png" % ds, sigma_clip=4)
-# Here we can draw the coordinate vectors on top of the image by processing
-# it through the camera. Then save it out.
-sc.annotate_axes()
-sc.render()
-sc.save("%s_vr_coords.png" % ds)
+# Draw the grid boundaries
+sc.annotate_grids(ds, alpha=0.01)
+sc.save("%s_vr_grids.png" % ds, sigma_clip=4)
+
+# Draw a coordinate axes triad
+sc.annotate_axes(alpha=0.01)
+sc.save("%s_vr_coords.png" % ds, sigma_clip=4)
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f doc/source/cookbook/vol-annotated.py
--- a/doc/source/cookbook/vol-annotated.py
+++ b/doc/source/cookbook/vol-annotated.py
@@ -1,75 +1,29 @@
-#!/usr/bin/env python
+import yt
-import numpy as np
-import pylab
+ds = yt.load('Enzo_64/DD0043/data0043')
-import yt
-import yt.visualization.volume_rendering.old_camera as vr
+sc = yt.create_scene(ds, lens_type='perspective')
-ds = yt.load("maestro_subCh_plt00248")
+source = sc[0]
-dd = ds.all_data()
+source.set_field('density')
+source.set_log(True)
-# field in the dataset we will visualize
-field = ('boxlib', 'radial_velocity')
+# Set up the camera parameters: focus, width, resolution, and image orientation
+sc.camera.focus = ds.domain_center
+sc.camera.resolution = 1024
+sc.camera.north_vector = [0, 0, 1]
+sc.camera.position = [1.7, 1.7, 1.7]
-# the values we wish to highlight in the rendering. We'll put a Gaussian
-# centered on these with width sigma
-vals = [-1.e7, -5.e6, -2.5e6, 2.5e6, 5.e6, 1.e7]
-sigma = 2.e5
+# You may need to adjust the alpha values to get an image with good contrast.
+# For the annotate_domain call, the fourth value in the color tuple is the
+# alpha value.
+sc.annotate_axes(alpha=.02)
+sc.annotate_domain(ds, color=[1, 1, 1, .01])
-mi, ma = min(vals), max(vals)
+text_string = "T = {} Gyr".format(float(ds.current_time.to('Gyr')))
-# Instantiate the ColorTransferfunction.
-tf = yt.ColorTransferFunction((mi, ma))
-
-for v in vals:
- tf.sample_colormap(v, sigma**2, colormap="coolwarm")
-
-
-# volume rendering requires periodic boundaries. This dataset has
-# solid walls. We need to hack it for now (this will be fixed in
-# a later yt)
-ds.periodicity = (True, True, True)
-
-
-# Set up the camera parameters: center, looking direction, width, resolution
-c = np.array([0.0, 0.0, 0.0])
-L = np.array([1.0, 1.0, 1.2])
-W = 1.5*ds.domain_width
-N = 720
-
-# +z is "up" for our dataset
-north=[0.0,0.0,1.0]
-
-# Create a camera object
-cam = vr.Camera(c, L, W, N, transfer_function=tf, ds=ds,
- no_ghost=False, north_vector=north,
- fields = [field], log_fields = [False])
-
-im = cam.snapshot()
-
-# add an axes triad
-cam.draw_coordinate_vectors(im)
-
-# add the domain box to the image
-nim = cam.draw_domain(im)
-
-# increase the contrast -- for some reason, the enhance default
-# to save_annotated doesn't do the trick
-max_val = im[:,:,:3].std() * 4.0
-nim[:,:,:3] /= max_val
-
-# we want to write the simulation time on the figure, so create a
-# figure and annotate it
-f = pylab.figure()
-
-pylab.text(0.2, 0.85, "{:.3g} s".format(float(ds.current_time.d)),
- transform=f.transFigure, color="white")
-
-# tell the camera to use our figure
-cam._render_figure = f
-
-# save annotated -- this added the transfer function values,
-# and the clear_fig=False ensures it writes onto our existing figure.
-cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)
+# save an annotated version of the volume rendering including a representation
+# of the transfer function and a nice label showing the simulation time.
+sc.save_annotated("vol_annotated.png", sigma_clip=6,
+ text_annotate=[[(.1, 1.05), text_string]])
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f doc/source/cookbook/vol-lines.py
--- /dev/null
+++ b/doc/source/cookbook/vol-lines.py
@@ -0,0 +1,22 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import LineSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+nlines = 50
+vertices = (np.random.random([nlines, 2, 3]) - 0.5) * 200 * kpc
+colors = np.random.random([nlines, 4])
+colors[:, 3] = 0.1
+
+lines = LineSource(vertices, colors)
+sc.add_source(lines)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=4.0)
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f doc/source/cookbook/vol-points.py
--- /dev/null
+++ b/doc/source/cookbook/vol-points.py
@@ -0,0 +1,29 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import PointSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+npoints = 1000
+
+# Random particle positions
+vertices = np.random.random([npoints, 3])*200*kpc
+
+# Random colors
+colors = np.random.random([npoints, 4])
+
+# Set alpha value to something that produces a good contrast with the volume
+# rendering
+colors[:, 3] = 0.1
+
+points = PointSource(vertices, colors=colors)
+sc.add_source(points)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=5)
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -129,6 +129,9 @@
vertices. For instance, lines can be used to draw outlines of regions or
continents.
+Worked examples of using the ``LineSource`` and ``PointSource`` are available at
+:ref:`cookbook-vol-points` and :ref:`cookbook-vol-lines`.
+
.. _volume_rendering_annotations:
Annotations
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f setup.py
--- a/setup.py
+++ b/setup.py
@@ -32,14 +32,6 @@
except pkg_resources.DistributionNotFound:
pass # yay!
-setuptools_ver = \
- LooseVersion(pkg_resources.get_distribution("setuptools").version)
-if setuptools_ver < LooseVersion("18.0"):
- print("Your setuptools version is too old to properly handle cython extensions.")
- print("Please update setuptools before proceeding:")
- print(" pip install -U setuptools")
- sys.exit(1)
-
MAPSERVER_FILES = []
MAPSERVER_DIRS = [
"",
@@ -378,11 +370,11 @@
package_data = {'':['*.pxd']},
setup_requires=[
'numpy',
- 'cython>=0.22',
+ 'cython>=0.24',
],
install_requires=[
'matplotlib',
- 'setuptools>=18.0',
+ 'setuptools>=19.6',
'sympy',
'numpy',
'IPython',
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -50,7 +50,7 @@
local_tipsy_001:
- yt/frontends/tipsy/tests/test_outputs.py
- local_varia_001:
+ local_varia_002:
- yt/analysis_modules/radmc3d_export
- yt/frontends/moab/tests/test_c5.py
- yt/analysis_modules/photon_simulator/tests/test_spectra.py
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -57,9 +57,9 @@
cdef np.float64_t tl, temp_x, temp_y = -1
if max_t > 1.0: max_t = 1.0
direction = -1
- if vc.left_edge[0] <= v_pos[0] and v_pos[0] <= vc.right_edge[0] and \
- vc.left_edge[1] <= v_pos[1] and v_pos[1] <= vc.right_edge[1] and \
- vc.left_edge[2] <= v_pos[2] and v_pos[2] <= vc.right_edge[2]:
+ if vc.left_edge[0] <= v_pos[0] and v_pos[0] < vc.right_edge[0] and \
+ vc.left_edge[1] <= v_pos[1] and v_pos[1] < vc.right_edge[1] and \
+ vc.left_edge[2] <= v_pos[2] and v_pos[2] < vc.right_edge[2]:
intersect_t = 0.0
direction = 3
for i in range(3):
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -27,6 +27,7 @@
from libc.string cimport strcmp
from cython.view cimport memoryview
+from cython.view cimport array as cvarray
from cpython cimport buffer
@@ -327,11 +328,12 @@
cdef int nx = image.shape[0]
cdef int ny = image.shape[1]
cdef int nl = xs.shape[0]
- cdef np.float64_t alpha[4]
- cdef int i, j
+ cdef np.float64_t[:] alpha
+ cdef int i, j, c
cdef int dx, dy, sx, sy, e2, err
cdef np.int64_t x0, x1, y0, y1, yi0
cdef np.float64_t z0, z1, dzx, dzy
+ alpha = np.zeros(4)
for j in range(0, nl, 2):
# From wikipedia http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
x0 = xs[j]
@@ -348,8 +350,11 @@
if crop == 1 and (dx > nx/2.0 or dy > ny/2.0):
continue
- for i in range(4):
- alpha[i] = colors[j/points_per_color, i]
+ c = j/points_per_color/2
+
+ for i in range(3):
+ alpha[i] = colors[c, i] * colors[c, 3]
+ alpha[3] = colors[c, 3]
if x0 < x1:
sx = 1
@@ -417,19 +422,22 @@
cdef int nx = image.shape[0]
cdef int ny = image.shape[1]
cdef int nl = xs.shape[0]
- cdef np.float64_t alpha[4]
+ cdef np.float64_t[:] alpha
cdef np.float64_t talpha
- cdef int i, j
+ cdef int i, j, c
cdef np.int64_t x0, y0, yi0
cdef np.float64_t z0
+ alpha = np.zeros(4)
for j in range(0, nl):
x0 = xs[j]
y0 = ys[j]
z0 = zs[j]
if (x0 < 0 or x0 >= nx): continue
if (y0 < 0 or y0 >= ny): continue
- for i in range(4):
- alpha[i] = colors[j/points_per_color, i]
+ c = j/points_per_color
+ for i in range(3):
+ alpha[i] = colors[c, i] * colors[c, 3]
+ alpha[3] = colors[c, 3]
if flip:
yi0 = ny - y0
else:
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -471,10 +471,10 @@
image.shape = camera.resolution[0], camera.resolution[1], 4
# If the call is from VR, the image is rotated by 180 to get correct
# up direction
- if call_from_VR is True:
+ if call_from_VR is True:
image = np.rot90(image, k=2)
if self.transfer_function.grey_opacity is False:
- image[:, :, 3] = 1.0
+ image[:, :, 3] = 1
return image
def __repr__(self):
@@ -811,8 +811,8 @@
Parameters
----------
positions: array, shape (N, 3)
- These positions, in data-space coordinates, are the points to be
- added to the scene.
+ The positions of points to be added to the scene. If specified with no
+ units, the positions will be assumed to be in code units.
colors : array, shape (N, 4), optional
The colors of the points, including an alpha channel, in floating
point running from 0..1.
@@ -829,18 +829,19 @@
>>> import yt
>>> import numpy as np
>>> from yt.visualization.volume_rendering.api import PointSource
+ >>> from yt.units import kpc
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-
+
>>> im, sc = yt.volume_render(ds)
-
+
>>> npoints = 1000
- >>> vertices = np.random.random([npoints, 3])
+ >>> vertices = np.random.random([npoints, 3]) * 1000 * kpc
>>> colors = np.random.random([npoints, 4])
>>> colors[:,3] = 1.0
>>> points = PointSource(vertices, colors=colors)
>>> sc.add_source(points)
-
+
>>> im = sc.render()
"""
@@ -858,7 +859,6 @@
# If colors aren't individually set, make black with full opacity
if colors is None:
colors = np.ones((len(positions), 4))
- colors[:, 3] = 1.
self.colors = colors
self.color_stride = color_stride
@@ -912,19 +912,25 @@
This class provides a mechanism for adding lines to a scene; these
points will be opaque, and can also be colored.
+ .. note::
+
+ If adding a LineSource to your rendering causes the image to appear
+ blank or fades a VolumeSource, try lowering the values specified in
+ the alpha channel of the ``colors`` array.
+
Parameters
----------
positions: array, shape (N, 2, 3)
- These positions, in data-space coordinates, are the starting and
- stopping points for each pair of lines. For example,
- positions[0][0] and positions[0][1] would give the (x, y, z)
+ The positions of the starting and stopping points for each line.
+ For example,positions[0][0] and positions[0][1] would give the (x, y, z)
coordinates of the beginning and end points of the first line,
- respectively.
+ respectively. If specified with no units, assumed to be in code units.
colors : array, shape (N, 4), optional
The colors of the points, including an alpha channel, in floating
- point running from 0..1. Note that they correspond to the line
- segment succeeding each point; this means that strictly speaking
- they need only be (N-1) in length.
+ point running from 0..1. The four channels correspond to r, g, b, and
+ alpha values. Note that they correspond to the line segment succeeding
+ each point; this means that strictly speaking they need only be (N-1)
+ in length.
color_stride : int, optional
The stride with which to access the colors when putting them on the
scene.
@@ -938,20 +944,21 @@
>>> import yt
>>> import numpy as np
>>> from yt.visualization.volume_rendering.api import LineSource
+ >>> from yt.units import kpc
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-
+
>>> im, sc = yt.volume_render(ds)
-
- >>> npoints = 100
- >>> vertices = np.random.random([npoints, 2, 3])
- >>> colors = np.random.random([npoints, 4])
+
+ >>> nlines = 4
+ >>> vertices = np.random.random([nlines, 2, 3]) * 600 * kpc
+ >>> colors = np.random.random([nlines, 4])
>>> colors[:,3] = 1.0
-
+
>>> lines = LineSource(vertices, colors)
>>> sc.add_source(lines)
>>> im = sc.render()
-
+
"""
_image = None
@@ -974,7 +981,6 @@
# If colors aren't individually set, make black with full opacity
if colors is None:
colors = np.ones((len(positions), 4))
- colors[:, 3] = 1.
self.colors = colors
self.color_stride = color_stride
@@ -1016,14 +1022,15 @@
py = py.astype('int64')
if len(px.shape) == 1:
- zlines(empty, z, px, py, dz, self.colors, self.color_stride)
+ zlines(empty, z, px, py, dz, self.colors.astype('float64'),
+ self.color_stride)
else:
# For stereo-lens, two sets of pos for each eye are contained
# in px...pz
- zlines(empty, z, px[0,:], py[0,:], dz[0,:], self.colors,
- self.color_stride)
- zlines(empty, z, px[1,:], py[1,:], dz[1,:], self.colors,
- self.color_stride)
+ zlines(empty, z, px[0, :], py[0, :], dz[0, :],
+ self.colors.astype('float64'), self.color_stride)
+ zlines(empty, z, px[1, :], py[1, :], dz[1, :],
+ self.colors.astype('float64'), self.color_stride)
self.zbuffer = zbuffer
return zbuffer
@@ -1180,7 +1187,7 @@
colors = apply_colormap(
levels*1.0,
color_bounds=[0, self.data_source.ds.index.max_level],
- cmap_name=cmap)[0, :, :]*alpha/255.
+ cmap_name=cmap)[0, :, :]/255.
colors[:, 3] = alpha
order = [0, 1, 1, 2, 2, 3, 3, 0]
@@ -1230,9 +1237,9 @@
# If colors aren't individually set, make black with full opacity
if colors is None:
colors = np.zeros((3, 4))
- colors[0, 0] = alpha # x is red
- colors[1, 1] = alpha # y is green
- colors[2, 2] = alpha # z is blue
+ colors[0, 0] = 1.0 # x is red
+ colors[1, 1] = 1.0 # y is green
+ colors[2, 2] = 1.0 # z is blue
colors[:, 3] = alpha
self.colors = colors
self.color_stride = 2
@@ -1316,14 +1323,15 @@
py = py.astype('int64')
if len(px.shape) == 1:
- zlines(empty, z, px, py, dz, self.colors, self.color_stride)
+ zlines(empty, z, px, py, dz, self.colors.astype('float64'),
+ self.color_stride)
else:
# For stereo-lens, two sets of pos for each eye are contained
# in px...pz
- zlines(empty, z, px[0,:], py[0,:], dz[0,:], self.colors,
- self.color_stride)
- zlines(empty, z, px[1,:], py[1,:], dz[1,:], self.colors,
- self.color_stride)
+ zlines(empty, z, px[0, :], py[0, :], dz[0, :],
+ self.colors.astype('float64'), self.color_stride)
+ zlines(empty, z, px[1, :], py[1, :], dz[1, :],
+ self.colors.astype('float64'), self.color_stride)
# Set the new zbuffer
self.zbuffer = zbuffer
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -34,7 +34,9 @@
GridSource, \
RenderSource, \
MeshSource, \
- VolumeSource
+ VolumeSource, \
+ PointSource, \
+ LineSource
from .zbuffer_array import ZBuffer
from yt.extern.six.moves import builtins
from yt.utilities.exceptions import YTNotInsideNotebook
@@ -158,6 +160,11 @@
"Line annotation sources are not supported for %s."
% (type(self.camera.lens).__name__), )
+ if isinstance(render_source, (LineSource, PointSource)):
+ if isinstance(render_source.positions, YTArray):
+ render_source.positions = \
+ self.arr(render_source.positions).in_units('code_length').d
+
self.sources[keyname] = render_source
return self
@@ -330,10 +337,11 @@
A format specifier (e.g., label_fmt="%.2g") to use in formatting
the data values that label the transfer function colorbar.
text_annotate : list of iterables
- Any text that you wish to display on the image. This should be an
- list of a tuple of coordinates (in normalized figure coordinates),
- the text to display, and, optionally, a dictionary of keyword/value
- pairs to pass through to the matplotlib text() function.
+ Any text that you wish to display on the image. This should be an
+ list containing a tuple of coordinates (in normalized figure
+ coordinates), the text to display, and, optionally, a dictionary of
+ keyword/value pairs to pass through to the matplotlib text()
+ function.
Each item in the main list is a separate string to write.
@@ -385,12 +393,12 @@
rs = rensources[0]
tf = rs.transfer_function
label = rs.data_source.ds._get_field_info(rs.field).get_label()
- if rs.data_source.ds._get_field_info(rs.field).take_log:
+ if rs.log_field:
label = r'$\rm{log}\ $' + label
ax = self._show_mpl(self._last_render.swapaxes(0, 1),
sigma_clip=sigma_clip, dpi=dpi)
- self._annotate(ax.axes, tf, label=label, label_fmt=label_fmt)
+ self._annotate(ax.axes, tf, rs, label=label, label_fmt=label_fmt)
plt.tight_layout()
# any text?
@@ -434,7 +442,7 @@
return axim
- def _annotate(self, ax, tf, label="", label_fmt=None):
+ def _annotate(self, ax, tf, source, label="", label_fmt=None):
import matplotlib.pyplot as plt
ax.get_xaxis().set_visible(False)
ax.get_xaxis().set_ticks([])
@@ -442,7 +450,9 @@
ax.get_yaxis().set_ticks([])
cb = plt.colorbar(ax.images[0], pad=0.0, fraction=0.05,
drawedges=True, shrink=0.75)
- tf.vert_cbar(ax=cb.ax, label=label, label_fmt=label_fmt)
+ tf.vert_cbar(ax=cb.ax, label=label, label_fmt=label_fmt,
+ resolution=self.camera.resolution[0],
+ log_scale=source.log_field)
def _validate(self):
r"""Validate the current state of the scene."""
@@ -649,7 +659,7 @@
"""
box_source = BoxSource(ds.domain_left_edge,
ds.domain_right_edge,
- color=None)
+ color=color)
self.add_source(box_source)
return self
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -47,7 +47,7 @@
self.log = False
self.tf = None
self.bounds = None
- self.grey_opacity = True
+ self.grey_opacity = False
self.profiles = {}
def set_bounds(self, bounds=None):
diff -r b3f018f29a22e04e36c41ba6537accd8c167f11c -r f2508397629ee2e657d3e6f78415b8209cb5f95f yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -554,7 +554,8 @@
ax.set_ylabel("Opacity")
ax.set_xlabel("Value")
- def vert_cbar(self, ax=None, label=None, label_fmt=None):
+ def vert_cbar(self, resolution, log_scale, ax=None, label=None,
+ label_fmt=None):
r"""Display an image of the transfer function
This function loads up matplotlib and displays the current transfer function.
@@ -603,6 +604,8 @@
ax.yaxis.set_ticks(xticks)
def x_format(x, pos):
val = x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size-1) + self.alpha.x[0]
+ if log_scale is True:
+ val = 10**val
if label_fmt is None:
if abs(val) < 1.e-3 or abs(val) > 1.e4:
if not val == 0.0:
@@ -626,7 +629,7 @@
ax.get_xaxis().set_ticks([])
ax.set_ylim(visible[0], visible[-1])
ax.tick_params(axis='y', colors='white', size=10)
- ax.set_ylabel(label, color='white')
+ ax.set_ylabel(label, color='white', size=10*resolution/512.0)
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list