[yt-svn] commit/yt: 4 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Thu Oct 15 23:08:50 PDT 2015
4 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/8f7e36cbf691/
Changeset: 8f7e36cbf691
Branch: yt
User: ngoldbaum
Date: 2015-10-15 22:17:34+00:00
Summary: Add and expand docstrings for all classes and member functions that were missing
Affected #: 6 files
diff -r 562b253b734a3abc182b712fb9009034ea3b36c6 -r 8f7e36cbf691d0f592c0d7c259f5eb1345d9adea yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -29,16 +29,15 @@
latex_symbol_lut, unit_prefixes, \
prefixable_units, cgs_base_units, \
mks_base_units, latex_prefixes, yt_base_units
-from yt.units.unit_registry import UnitRegistry
+from yt.units.unit_registry import \
+ UnitRegistry, \
+ UnitParseError
from yt.utilities.exceptions import YTUnitsNotReducible
import copy
import string
import token
-class UnitParseError(Exception):
- pass
-
class InvalidUnitOperation(Exception):
pass
diff -r 562b253b734a3abc182b712fb9009034ea3b36c6 -r 8f7e36cbf691d0f592c0d7c259f5eb1345d9adea yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -14,6 +14,7 @@
from yt.funcs import iterable, mylog, ensure_numpy_array
from yt.utilities.orientation import Orientation
from yt.units.yt_array import YTArray
+from yt.units.unit_registry import UnitParseError
from yt.utilities.math_utils import get_rotation_matrix
from .utils import data_source_or_all
from .lens import lenses
@@ -22,16 +23,34 @@
class Camera(Orientation):
- r"""
+ r"""A representation of a point of view into a Scene.
- The Camera class. A Camera represents of point of view into a
- Scene. It is defined by a position (the location of the camera
+ It is defined by a position (the location of the camera
in the simulation domain,), a focus (the point at which the
camera is pointed), a width (the width of the snapshot that will
be taken, a resolution (the number of pixels in the image), and
a north_vector (the "up" direction in the resulting image). A
camera can use a variety of different Lens objects.
+ Parameters
+ ----------
+ data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+ This is the source to be rendered, which can be any arbitrary yt
+ data object or dataset.
+ lens_type: string, optional
+ This specifies the type of lens to use for rendering. Current
+ options are 'plane-parallel', 'perspective', and 'fisheye'. See
+ :class:`yt.visualization.volume_rendering.lens.Lens` for details.
+ Default: 'plane-parallel'
+ auto: boolean
+ If True, build smart defaults using the data source extent. This
+ can be time-consuming to iterate over the entire dataset to find
+ the positional bounds. Default: False
+
+ Examples
+ --------
+ >>> cam = Camera(ds)
+
"""
_moved = True
@@ -42,29 +61,7 @@
def __init__(self, data_source=None, lens_type='plane-parallel',
auto=False):
- """
- Initialize a Camera Instance
-
- Parameters
- ----------
- data_source: :class:`AMR3DData` or :class:`Dataset`, optional
- This is the source to be rendered, which can be any arbitrary yt
- data object or dataset.
- lens_type: string, optional
- This specifies the type of lens to use for rendering. Current
- options are 'plane-parallel', 'perspective', and 'fisheye'. See
- :class:`yt.visualization.volume_rendering.lens.Lens` for details.
- Default: 'plane-parallel'
- auto: boolean
- If True, build smart defaults using the data source extent. This
- can be time-consuming to iterate over the entire dataset to find
- the positional bounds. Default: False
-
- Examples
- --------
- >>> cam = Camera(ds)
-
- """
+ """Initialize a Camera Instance"""
self.lens = None
self.north_vector = None
self.normal_vector = None
@@ -161,9 +158,7 @@
return lens_params
def set_lens(self, lens_type):
- r'''
-
- Set the lens to be used with this camera.
+ r"""Set the lens to be used with this camera.
Parameters
----------
@@ -177,7 +172,7 @@
'spherical'
'stereo-spherical'
- '''
+ """
if lens_type not in lenses:
mylog.error("Lens type not available")
raise RuntimeError()
@@ -185,6 +180,7 @@
self.lens.camera = self
def set_defaults_from_data_source(self, data_source):
+ """Resets the camera attributes to their default values"""
self.position = data_source.pf.domain_right_edge
width = 1.5 * data_source.pf.domain_width.max()
@@ -215,20 +211,22 @@
self._moved = True
def set_width(self, width):
- r"""
-
- Set the width of the image that will be produced by this camera.
- This must be a YTQuantity.
+ r"""Set the width of the image that will be produced by this camera.
Parameters
----------
- width : :class:`yt.units.yt_array.YTQuantity`
-
+ width : YTQuantity or 3 element YTArray
+ The width of the volume rendering in the horizontal, vertical, and
+ depth directions. If a scalar, assumes that the width is the same in
+ all three directions.
"""
- assert isinstance(width, YTArray), 'Width must be created with ds.arr'
- if isinstance(width, YTArray):
+ try:
width = width.in_units('code_length')
+ except (AttributeError, UnitParseError):
+ raise ValueError(
+ 'Volume rendering width must be a YTArray that can be '
+ 'converted to code units')
if not iterable(width):
width = YTArray([width.d]*3, width.units) # Can't get code units.
@@ -236,9 +234,7 @@
self.switch_orientation()
def set_position(self, position, north_vector=None):
- r"""
-
- Set the position of the camera.
+ r"""Set the position of the camera.
Parameters
----------
@@ -256,8 +252,7 @@
north_vector=north_vector)
def switch_orientation(self, normal_vector=None, north_vector=None):
- r"""
- Change the view direction based on any of the orientation parameters.
+ r"""Change the view direction based on any of the orientation parameters.
This will recalculate all the necessary vectors and vector planes
related to an orientable object.
@@ -468,11 +463,6 @@
factor : float
The factor by which to reduce the distance to the focal point.
-
- Notes
- -----
-
- You will need to call snapshot() again to get a new image.
"""
self.set_width(self.width / factor)
diff -r 562b253b734a3abc182b712fb9009034ea3b36c6 -r 8f7e36cbf691d0f592c0d7c259f5eb1345d9adea yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -26,15 +26,7 @@
class Lens(ParallelAnalysisInterface):
-
- """
-
- A base class for setting up Lens objects. A Lens,
- along with a Camera, is used to defined the set of
- rays that will be used for rendering.
-
- """
-
+ """A Lens is used to define the set of rays for rendering."""
def __init__(self, ):
super(Lens, self).__init__()
self.viewpoint = None
@@ -48,9 +40,14 @@
self.sampler = None
def set_camera(self, camera):
+ """Set the properties of the lens based on the camera.
+
+ This is a proxy for setup_box_properties
+ """
self.setup_box_properties(camera)
def new_image(self, camera):
+ """Initialize a new ImageArray to be used with this lens."""
self.current_image = ImageArray(
np.zeros((camera.resolution[0], camera.resolution[1],
4), dtype='float64', order='C'),
@@ -58,6 +55,7 @@
return self.current_image
def setup_box_properties(self, camera):
+ """Set up the view and stage based on the properties of the camera."""
unit_vectors = camera.unit_vectors
width = camera.width
center = camera.focus
@@ -80,13 +78,12 @@
class PlaneParallelLens(Lens):
+ r"""The lens for orthographic projections.
- r'''
-
- This lens type is the standard type used for orthographic projections.
All rays emerge parallel to each other, arranged along a plane.
- '''
+ The initializer takes no parameters.
+ """
def __init__(self, ):
super(PlaneParallelLens, self).__init__()
@@ -111,6 +108,7 @@
return sampler_params
def set_viewpoint(self, camera):
+ """Set the viewpoint based on the camera"""
# This is a hack that should be replaced by an alternate plane-parallel
# traversal. Put the camera really far away so that the effective
# viewpoint is infinitely far away, making for parallel rays.
@@ -135,13 +133,11 @@
class PerspectiveLens(Lens):
+ r"""A lens for viewing a scene with a set of rays within an opening angle.
- r'''
-
- This lens type adjusts for an opening view angle, so that the scene will
- have an element of perspective to it.
-
- '''
+ The scene will have an element of perspective to it since the rays are not
+ parallel.
+ """
def __init__(self):
super(PerspectiveLens, self).__init__()
@@ -174,24 +170,30 @@
px = np.mat(np.linspace(-.5, .5, camera.resolution[0]))
py = np.mat(np.linspace(-.5, .5, camera.resolution[1]))
- sample_x = camera.width[0] * np.array(east_vec.reshape(3,1) * px).transpose()
- sample_y = camera.width[1] * np.array(north_vec.reshape(3,1) * py).transpose()
+ sample_x = camera.width[0] * np.array(east_vec.reshape(3, 1) * px)
+ sample_x = sample_x.transpose()
+ sample_y = camera.width[1] * np.array(north_vec.reshape(3, 1) * py)
+ sample_y = sample_y.transpose()
vectors = np.zeros((camera.resolution[0], camera.resolution[1], 3),
dtype='float64', order='C')
- sample_x = np.repeat(sample_x.reshape(camera.resolution[0],1,3), \
+ sample_x = np.repeat(sample_x.reshape(camera.resolution[0], 1, 3),
camera.resolution[1], axis=1)
- sample_y = np.repeat(sample_y.reshape(1,camera.resolution[1],3), \
+ sample_y = np.repeat(sample_y.reshape(1, camera.resolution[1], 3),
camera.resolution[0], axis=0)
- normal_vecs = np.tile(normal_vec, camera.resolution[0] * camera.resolution[1])\
- .reshape(camera.resolution[0], camera.resolution[1], 3)
+ normal_vecs = np.tile(
+ normal_vec, camera.resolution[0] * camera.resolution[1])
+ normal_vecs = normal_vecs.reshape(
+ camera.resolution[0], camera.resolution[1], 3)
vectors = sample_x + sample_y + normal_vecs * camera.width[2]
- positions = np.tile(camera.position, camera.resolution[0] * camera.resolution[1])\
- .reshape(camera.resolution[0], camera.resolution[1], 3)
+ positions = np.tile(
+ camera.position, camera.resolution[0] * camera.resolution[1])
+ positions = positions.reshape(
+ camera.resolution[0], camera.resolution[1], 3)
uv = np.ones(3, dtype='float64')
@@ -234,11 +236,12 @@
if np.arccos(sight_angle_cos) < 0.5 * np.pi:
sight_length = camera.width[2] / sight_angle_cos
else:
- # If the corner is on the backwards, then we put it outside of the image
- # It can not be simply removed because it may connect to other corner
- # within the image, which produces visible domain boundary line
- sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2) / \
- np.sqrt(1 - sight_angle_cos**2)
+ # If the corner is on the backwards, then we put it outside of
+ # the image It can not be simply removed because it may connect
+ # to other corner within the image, which produces visible
+ # domain boundary line
+ sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2)
+ sight_length = sight_length / np.sqrt(1 - sight_angle_cos**2)
pos1[i] = camera.position + sight_length * sight_vector[i]
dx = np.dot(pos1 - sight_center.d, camera.unit_vectors[0])
@@ -256,8 +259,7 @@
class StereoPerspectiveLens(Lens):
-
- """docstring for StereoPerspectiveLens"""
+ """A lens that includes two sources for perspective rays, for 3D viewing"""
def __init__(self):
super(StereoPerspectiveLens, self).__init__()
@@ -265,6 +267,7 @@
self.disparity = None
def new_image(self, camera):
+ """Initialize a new ImageArray to be used with this lens."""
self.current_image = ImageArray(
np.zeros((camera.resolution[0]*camera.resolution[1], 1,
4), dtype='float64', order='C'),
@@ -275,10 +278,6 @@
# We should move away from pre-generation of vectors like this and into
# the usage of on-the-fly generation in the VolumeIntegrator module
# We might have a different width and back_center
- # dl = (self.back_center - self.front_center)
- # self.front_center += self.expand_factor*dl
- # self.back_center -= dl
-
if self.disparity is None:
self.disparity = camera.width[0] / 2.e3
@@ -287,8 +286,10 @@
else:
image = self.new_image(camera)
- vectors_left, positions_left = self._get_positions_vectors(camera, -self.disparity)
- vectors_right, positions_right = self._get_positions_vectors(camera, self.disparity)
+ vectors_left, positions_left = self._get_positions_vectors(
+ camera, -self.disparity)
+ vectors_right, positions_right = self._get_positions_vectors(
+ camera, self.disparity)
uv = np.ones(3, dtype='float64')
@@ -330,28 +331,37 @@
px = np.mat(np.linspace(-.5, .5, single_resolution_x))
py = np.mat(np.linspace(-.5, .5, camera.resolution[1]))
- sample_x = camera.width[0] * np.array(east_vec_rot.reshape(3,1) * px).transpose()
- sample_y = camera.width[1] * np.array(north_vec.reshape(3,1) * py).transpose()
+ sample_x = camera.width[0] * np.array(east_vec_rot.reshape(3, 1) * px)
+ sample_x = sample_x.transpose()
+ sample_y = camera.width[1] * np.array(north_vec.reshape(3, 1) * py)
+ sample_y = sample_y.transpose()
vectors = np.zeros((single_resolution_x, camera.resolution[1], 3),
dtype='float64', order='C')
- sample_x = np.repeat(sample_x.reshape(single_resolution_x,1,3), \
+ sample_x = np.repeat(sample_x.reshape(single_resolution_x, 1, 3),
camera.resolution[1], axis=1)
- sample_y = np.repeat(sample_y.reshape(1,camera.resolution[1],3), \
+ sample_y = np.repeat(sample_y.reshape(1, camera.resolution[1], 3),
single_resolution_x, axis=0)
- normal_vecs = np.tile(normal_vec_rot, single_resolution_x * camera.resolution[1])\
- .reshape(single_resolution_x, camera.resolution[1], 3)
- east_vecs = np.tile(east_vec_rot, single_resolution_x * camera.resolution[1])\
- .reshape(single_resolution_x, camera.resolution[1], 3)
+ normal_vecs = np.tile(
+ normal_vec_rot, single_resolution_x * camera.resolution[1])
+ normal_vecs = normal_vecs.reshape(
+ single_resolution_x, camera.resolution[1], 3)
+ east_vecs = np.tile(
+ east_vec_rot, single_resolution_x * camera.resolution[1])
+ east_vecs = east_vecs.reshape(
+ single_resolution_x, camera.resolution[1], 3)
vectors = sample_x + sample_y + normal_vecs * camera.width[2]
- positions = np.tile(camera.position, single_resolution_x * camera.resolution[1])\
- .reshape(single_resolution_x, camera.resolution[1], 3)
+ positions = np.tile(
+ camera.position, single_resolution_x * camera.resolution[1])
+ positions = positions.reshape(
+ single_resolution_x, camera.resolution[1], 3)
- positions = positions + east_vecs * disparity # Here the east_vecs is non-rotated one
+ # Here the east_vecs is non-rotated one
+ positions = positions + east_vecs * disparity
mylog.debug(positions)
mylog.debug(vectors)
@@ -365,8 +375,10 @@
if self.disparity is None:
self.disparity = camera.width[0] / 2.e3
- px_left, py_left, dz_left = self._get_px_py_dz(camera, pos, res, -self.disparity)
- px_right, py_right, dz_right = self._get_px_py_dz(camera, pos, res, self.disparity)
+ px_left, py_left, dz_left = self._get_px_py_dz(
+ camera, pos, res, -self.disparity)
+ px_right, py_right, dz_right = self._get_px_py_dz(
+ camera, pos, res, self.disparity)
px = np.hstack([px_left, px_right])
py = np.hstack([py_left, py_right])
@@ -402,16 +414,18 @@
if np.arccos(sight_angle_cos) < 0.5 * np.pi:
sight_length = camera.width[2] / sight_angle_cos
else:
- # If the corner is on the backwards, then we put it outside of the image
- # It can not be simply removed because it may connect to other corner
- # within the image, which produces visible domain boundary line
- sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2) / \
- np.sqrt(1 - sight_angle_cos**2)
+ # If the corner is on the backwards, then we put it outside of
+ # the image It can not be simply removed because it may connect
+ # to other corner within the image, which produces visible
+ # domain boundary line
+ sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2)
+ sight_length = sight_length / np.sqrt(1 - sight_angle_cos**2)
pos1[i] = camera_position_shift + sight_length * sight_vector[i]
dx = np.dot(pos1 - sight_center.d, east_vec_rot)
dy = np.dot(pos1 - sight_center.d, north_vec)
dz = np.dot(pos1 - sight_center.d, normal_vec_rot)
+
# Transpose into image coords.
px = (res0_h * 0.5 + res0_h / camera.width[0].d * dx).astype('int')
py = (res[1] * 0.5 + res[1] / camera.width[1].d * dy).astype('int')
@@ -431,14 +445,13 @@
class FisheyeLens(Lens):
+ r"""A lens for dome-based renderings
- r"""
-
- This lens type accepts a field-of-view property, fov, that describes how wide
- an angle the fisheye can see. Fisheye images are typically used for dome-based
- presentations; the Hayden planetarium for instance has a field of view of 194.6.
- The images returned by this camera will be flat pixel images that can and should
- be reshaped to the resolution.
+ This lens type accepts a field-of-view property, fov, that describes how
+ wide an angle the fisheye can see. Fisheye images are typically used for
+ dome-based presentations; the Hayden planetarium for instance has a field of
+ view of 194.6. The images returned by this camera will be flat pixel images
+ that can and should be reshaped to the resolution.
"""
@@ -450,11 +463,13 @@
self.rotation_matrix = np.eye(3)
def setup_box_properties(self, camera):
+ """Set up the view and stage based on the properties of the camera."""
self.radius = camera.width.max()
super(FisheyeLens, self).setup_box_properties(camera)
self.set_viewpoint(camera)
def new_image(self, camera):
+ """Initialize a new ImageArray to be used with this lens."""
self.current_image = ImageArray(
np.zeros((camera.resolution[0]**2, 1,
4), dtype='float64', order='C'),
@@ -489,9 +504,7 @@
return sampler_params
def set_viewpoint(self, camera):
- """
- For a FisheyeLens, the viewpoint is the front center.
- """
+ """For a FisheyeLens, the viewpoint is the camera's position"""
self.viewpoint = camera.position
def __repr__(self):
@@ -530,12 +543,11 @@
class SphericalLens(Lens):
+ r"""A lens for cylindrical-spherical projection.
- r"""
+ Movies rendered in this way can be displayed in head-tracking devices or
+ in YouTube 360 view.
- This is a cylindrical-spherical projection. Movies rendered in this way
- can be displayed in head-tracking devices or in YouTube 360 view.
-
"""
def __init__(self):
@@ -545,6 +557,7 @@
self.rotation_matrix = np.eye(3)
def setup_box_properties(self, camera):
+ """Set up the view and stage based on the properties of the camera."""
self.radius = camera.width.max()
super(SphericalLens, self).setup_box_properties(camera)
self.set_viewpoint(camera)
@@ -562,11 +575,13 @@
vectors[:, :, 2] = np.sin(py)
vectors = vectors * camera.width[0]
- positions = np.tile(camera.position, camera.resolution[0] * camera.resolution[1])\
- .reshape(camera.resolution[0], camera.resolution[1], 3)
+ positions = np.tile(
+ camera.position,
+ camera.resolution[0] * camera.resolution[1]).reshape(
+ camera.resolution[0], camera.resolution[1], 3)
- R1 = get_rotation_matrix(0.5*np.pi, [1,0,0])
- R2 = get_rotation_matrix(0.5*np.pi, [0,0,1])
+ R1 = get_rotation_matrix(0.5*np.pi, [1, 0, 0])
+ R2 = get_rotation_matrix(0.5*np.pi, [0, 0, 1])
uv = np.dot(R1, camera.unit_vectors)
uv = np.dot(R2, uv)
vectors.reshape((camera.resolution[0]*camera.resolution[1], 3))
@@ -595,9 +610,7 @@
return sampler_params
def set_viewpoint(self, camera):
- """
- For a PerspectiveLens, the viewpoint is the front center.
- """
+ """For a SphericalLens, the viewpoint is the camera's position"""
self.viewpoint = camera.position
def project_to_plane(self, camera, pos, res=None):
@@ -631,8 +644,11 @@
class StereoSphericalLens(Lens):
+ r"""A lens for a stereo cylindrical-spherical projection.
- """docstring for StereoSphericalLens"""
+ Movies rendered in this way can be displayed in VR devices or stereo youtube
+ 360 degree movies.
+ """
def __init__(self):
super(StereoSphericalLens, self).__init__()
@@ -651,31 +667,35 @@
self.disparity = camera.width[0] / 1000.
single_resolution_x = np.floor(camera.resolution[0])/2
- px = np.linspace(-np.pi, np.pi, single_resolution_x, endpoint=True)[:,None]
- py = np.linspace(-np.pi/2., np.pi/2., camera.resolution[1], endpoint=True)[None,:]
+ px = np.linspace(-np.pi, np.pi, single_resolution_x,
+ endpoint=True)[:, None]
+ py = np.linspace(-np.pi/2., np.pi/2., camera.resolution[1],
+ endpoint=True)[None, :]
vectors = np.zeros((single_resolution_x, camera.resolution[1], 3),
dtype='float64', order='C')
- vectors[:,:,0] = np.cos(px) * np.cos(py)
- vectors[:,:,1] = np.sin(px) * np.cos(py)
- vectors[:,:,2] = np.sin(py)
+ vectors[:, :, 0] = np.cos(px) * np.cos(py)
+ vectors[:, :, 1] = np.sin(px) * np.cos(py)
+ vectors[:, :, 2] = np.sin(py)
vectors = vectors * camera.width[0]
vectors2 = np.zeros((single_resolution_x, camera.resolution[1], 3),
dtype='float64', order='C')
- vectors2[:,:,0] = -np.sin(px) * np.ones((1, camera.resolution[1]))
- vectors2[:,:,1] = np.cos(px) * np.ones((1, camera.resolution[1]))
- vectors2[:,:,2] = 0
+ vectors2[:, :, 0] = -np.sin(px) * np.ones((1, camera.resolution[1]))
+ vectors2[:, :, 1] = np.cos(px) * np.ones((1, camera.resolution[1]))
+ vectors2[:, :, 2] = 0
- positions = np.tile(camera.position, single_resolution_x * camera.resolution[1])\
- .reshape(single_resolution_x, camera.resolution[1], 3)
+ positions = np.tile(
+ camera.position, single_resolution_x * camera.resolution[1])
+ positions = positions.reshape(
+ single_resolution_x, camera.resolution[1], 3)
# The left and right are switched here since VR is in LHS.
positions_left = positions + vectors2 * self.disparity
positions_right = positions + vectors2 * (-self.disparity)
- R1 = get_rotation_matrix(0.5*np.pi, [1,0,0])
- R2 = get_rotation_matrix(0.5*np.pi, [0,0,1])
+ R1 = get_rotation_matrix(0.5*np.pi, [1, 0, 0])
+ R2 = get_rotation_matrix(0.5*np.pi, [0, 0, 1])
uv = np.dot(R1, camera.unit_vectors)
uv = np.dot(R2, uv)
vectors.reshape((single_resolution_x*camera.resolution[1], 3))
diff -r 562b253b734a3abc182b712fb9009034ea3b36c6 -r 8f7e36cbf691d0f592c0d7c259f5eb1345d9adea yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -39,10 +39,9 @@
class RenderSource(ParallelAnalysisInterface):
- """
+ """Base Class for Render Sources.
- Base Class for Render Sources. Will be inherited for volumes,
- streamlines, etc.
+ Will be inherited for volumes, streamlines, etc.
"""
@@ -59,10 +58,9 @@
class OpaqueSource(RenderSource):
- """
+ """A base class for opaque render sources.
- A base class for opaque render sources. Will be inherited from
- for LineSources, BoxSources, etc.
+ Will be inherited from for LineSources, BoxSources, etc.
"""
def __init__(self):
@@ -72,50 +70,37 @@
def set_zbuffer(self, zbuffer):
self.zbuffer = zbuffer
- def render(self, camera, zbuffer=None):
- # This is definitely wrong for now
- if zbuffer is not None and self.zbuffer is not None:
- zbuffer.rgba = self.zbuffer.rgba
- zbuffer.z = self.zbuffer.z
- self.zbuffer = zbuffer
- return self.zbuffer
-
class VolumeSource(RenderSource):
+ """A class for rendering data from a volumetric data source
- """
+ Examples of such sources include a sphere, cylinder, or the
+ entire computational domain.
- A VolumeSource is a class for rendering data from
- an arbitrary volumetric data source, e.g. a sphere,
- cylinder, or the entire computational domain.
+ A :class:`VolumeSource` provides the framework to decompose an arbitrary
+ yt data source into bricks that can be traversed and volume rendered.
+ Parameters
+ ----------
+ data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+ This is the source to be rendered, which can be any arbitrary yt
+ data object or dataset.
+ fields : string
+ The name of the field(s) to be rendered.
+ auto: bool, optional
+ If True, will build a default AMRKDTree and transfer function based
+ on the data.
+
+ Examples
+ --------
+ >>> source = VolumeSource(ds.all_data(), 'density')
"""
_image = None
data_source = None
def __init__(self, data_source, field, auto=True):
- r"""Initialize a new volumetric source for rendering.
-
- A :class:`VolumeSource` provides the framework to decompose an arbitrary
- yt data source into bricks that can be traversed and volume rendered.
-
- Parameters
- ----------
- data_source: :class:`AMR3DData` or :class:`Dataset`, optional
- This is the source to be rendered, which can be any arbitrary yt
- data object or dataset.
- fields : string
- The name of the field(s) to be rendered.
- auto: bool, optional
- If True, will build a default AMRKDTree and transfer function based
- on the data.
-
- Examples
- --------
- >>> source = RenderSource(ds, 'density')
-
- """
+ r"""Initialize a new volumetric source for rendering."""
super(VolumeSource, self).__init__()
self.data_source = data_source_or_all(data_source)
field = self.data_source._determine_fields(field)[0]
@@ -138,13 +123,12 @@
self.build_defaults()
def build_defaults(self):
+ """Sets a default volume and transfer function"""
self.build_default_volume()
self.build_default_transfer_function()
def set_transfer_function(self, transfer_function):
- """
- Set transfer function for this source
- """
+ """Set transfer function for this source"""
if not isinstance(transfer_function,
(TransferFunction, ColorTransferFunction,
ProjectionTransferFunction)):
@@ -167,6 +151,7 @@
raise RuntimeError("Transfer Function not Supplied")
def build_default_transfer_function(self):
+ """Sets up a transfer function"""
self.tfh = \
TransferFunctionHelper(self.data_source.pf)
self.tfh.set_field(self.field)
@@ -175,6 +160,7 @@
self.transfer_function = self.tfh.tf
def build_default_volume(self):
+ """Sets up an AMRKDTree based on the VolumeSource's field"""
self.volume = AMRKDTree(self.data_source.pf,
data_source=self.data_source)
log_fields = [self.data_source.pf.field_info[self.field].take_log]
@@ -182,17 +168,23 @@
self.volume.set_fields([self.field], log_fields, True)
def set_volume(self, volume):
+ """Associates an AMRKDTree with the VolumeSource"""
assert(isinstance(volume, AMRKDTree))
del self.volume
self.volume = volume
- def set_field(self, field, no_ghost=True):
- field = self.data_source._determine_fields(field)[0]
- log_field = self.data_source.pf.field_info[field].take_log
- self.volume.set_fields(field, [log_field], no_ghost)
- self.field = field
+ def set_fields(self, fields, no_ghost=True):
+ """Set the source's fields to render
- def set_fields(self, fields, no_ghost=True):
+ Parameters
+ ---------
+ fields: field name or list of field names
+ The field or fields to render
+ no_ghost: boolean
+ If False, the AMRKDTree estimates vertex centered data using ghost
+ zones, which can eliminate seams in the resulting volume rendering.
+ Defaults to True for performance reasons.
+ """
fields = self.data_source._determine_fields(fields)
log_fields = [self.data_source.ds.field_info[f].take_log
for f in fields]
@@ -200,7 +192,12 @@
self.field = fields
def set_sampler(self, camera):
- """docstring for add_sampler"""
+ """Sets a volume render sampler
+
+ The type of sampler is determined based on the ``sampler_type`` attribute
+ of the VolumeSource. Currently the ``volume_render`` and ``projection``
+ sampler types are supported.
+ """
if self.sampler_type == 'volume-render':
sampler = new_volume_render_sampler(camera, self)
elif self.sampler_type == 'projection':
@@ -211,6 +208,24 @@
assert(self.sampler is not None)
def render(self, camera, zbuffer=None):
+ """Renders an image using the provided camera
+
+ Parameters
+ ----------
+ camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+ A volume rendering camera. Can be any type of camera.
+ zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+ A zbuffer array. This is used for opaque sources to determine the
+ z position of the source relative to other sources. Only useful if
+ you are manually calling render on multiple sources. Scene.render
+ uses this internally.
+
+ Returns
+ -------
+ A :class:`yt.data_objects.image_array.ImageArray` instance containing
+ the rendered image.
+
+ """
self.zbuffer = zbuffer
self.set_sampler(camera)
assert (self.sampler is not None)
@@ -238,11 +253,25 @@
return self.current_image
def finalize_image(self, camera, image, call_from_VR=False):
+ """Parallel reduce the image.
+
+ Parameters
+ ----------
+ camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+ The camera used to produce the volume rendering image.
+ image: :class:`yt.data_objects.image_array.ImageArray` instance
+ A reference to an image to fill
+ call_from_vr: boolean, optional
+ Whether or not this is being called from a higher level in the VR
+ interface. Used to set the correct orientation.
+ """
image = self.volume.reduce_tree_images(image,
camera.lens.viewpoint)
image.shape = camera.resolution[0], camera.resolution[1], 4
- # If the call is from VR, the image is rotated by 180 to get correct up dir
- if call_from_VR: image = np.rot90(image, k=2)
+ # If the call is from VR, the image is rotated by 180 to get correct
+ # up dirirection
+ if call_from_VR is True:
+ image = np.rot90(image, k=2)
if self.transfer_function.grey_opacity is False:
image[:, :, 3] = 1.0
return image
@@ -254,38 +283,33 @@
class MeshSource(RenderSource):
+ """A source for unstructured mesh data
- """
+ This functionality requires the embree ray-tracing engine and the
+ associated pyembree python bindings to be installed in order to
+ function.
- MeshSource is a class for volume rendering unstructured mesh
- data. This functionality requires the embree ray-tracing
- engine and the associated pyembree python bindings to be
- installed in order to function.
+ A :class:`MeshSource` provides the framework to volume render
+ unstructured mesh data.
+ Parameters
+ ----------
+ data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+ This is the source to be rendered, which can be any arbitrary yt
+ data object or dataset.
+ field : string
+ The name of the field to be rendered.
+
+ Examples
+ --------
+ >>> source = MeshSource(ds, ('all', 'convected'))
"""
_image = None
data_source = None
def __init__(self, data_source, field):
- r"""Initialize a new unstructured source for rendering.
-
- A :class:`MeshSource` provides the framework to volume render
- unstructured mesh data.
-
- Parameters
- ----------
- data_source: :class:`AMR3DData` or :class:`Dataset`, optional
- This is the source to be rendered, which can be any arbitrary yt
- data object or dataset.
- fields : string
- The name of the field to be rendered.
-
- Examples
- --------
- >>> source = MeshSource(ds, ('all', 'convected'))
-
- """
+ r"""Initialize a new unstructured source for rendering."""
super(MeshSource, self).__init__()
self.data_source = data_source_or_all(data_source)
field = self.data_source._determine_fields(field)[0]
@@ -323,7 +347,24 @@
field_data.d)
def render(self, camera, zbuffer=None):
+ """Renders an image using the provided camera
+ Parameters
+ ----------
+ camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+ A volume rendering camera. Can be any type of camera.
+ zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+ A zbuffer array. This is used for opaque sources to determine the
+ z position of the source relative to other sources. Only useful if
+ you are manually calling render on multiple sources. Scene.render
+ uses this internally.
+
+ Returns
+ -------
+ A :class:`yt.data_objects.image_array.ImageArray` instance containing
+ the rendered image.
+
+ """
self.sampler = new_mesh_sampler(camera, self)
mylog.debug("Casting rays")
@@ -340,33 +381,33 @@
class PointSource(OpaqueSource):
+ r"""A rendering source of opaque points in the scene.
+
+ This class provides a mechanism for adding points to a scene; these
+ points will be opaque, and can also be colored.
+
+ Parameters
+ ----------
+ positions: array, shape (N, 3)
+ These positions, in data-space coordinates, are the points to be
+ added to the scene.
+ colors : array, shape (N, 4), optional
+ The colors of the points, including an alpha channel, in floating
+ point running from 0..1.
+ color_stride : int, optional
+ The stride with which to access the colors when putting them on the
+ scene.
+
+ Examples
+ --------
+ >>> source = PointSource(particle_positions)
+
+ """
_image = None
data_source = None
def __init__(self, positions, colors=None, color_stride=1):
- r"""A rendering source of opaque points in the scene.
-
- This class provides a mechanism for adding points to a scene; these
- points will be opaque, and can also be colored.
-
- Parameters
- ----------
- positions: array, shape (N, 3)
- These positions, in data-space coordinates, are the points to be
- added to the scene.
- colors : array, shape (N, 4), optional
- The colors of the points, including an alpha channel, in floating
- point running from 0..1.
- color_stride : int, optional
- The stride with which to access the colors when putting them on the
- scene.
-
- Examples
- --------
- >>> source = PointSource(particle_positions)
-
- """
self.positions = positions
# If colors aren't individually set, make black with full opacity
if colors is None:
@@ -376,6 +417,24 @@
self.color_stride = color_stride
def render(self, camera, zbuffer=None):
+ """Renders an image using the provided camera
+
+ Parameters
+ ----------
+ camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+ A volume rendering camera. Can be any type of camera.
+ zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+ A zbuffer array. This is used for opaque sources to determine the
+ z position of the source relative to other sources. Only useful if
+ you are manually calling render on multiple sources. Scene.render
+ uses this internally.
+
+ Returns
+ -------
+ A :class:`yt.data_objects.image_array.ImageArray` instance containing
+ the rendered image.
+
+ """
vertices = self.positions
if zbuffer is None:
empty = camera.lens.new_image(camera)
@@ -401,39 +460,38 @@
class LineSource(OpaqueSource):
+ r"""A render source for a sequence of opaque line segments.
+
+ This class provides a mechanism for adding lines to a scene; these
+ points will be opaque, and can also be colored.
+
+ Parameters
+ ----------
+ positions: array, shape (N, 2, 3)
+ These positions, in data-space coordinates, are the starting and
+ stopping points for each pair of lines. For example,
+ positions[0][0] and positions[0][1] would give the (x, y, z)
+ coordinates of the beginning and end points of the first line,
+ respectively.
+ colors : array, shape (N, 4), optional
+ The colors of the points, including an alpha channel, in floating
+ point running from 0..1. Note that they correspond to the line
+ segment succeeding each point; this means that strictly speaking
+ they need only be (N-1) in length.
+ color_stride : int, optional
+ The stride with which to access the colors when putting them on the
+ scene.
+
+ Examples
+ --------
+ >>> source = LineSource(np.random.random((10, 3)))
+
+ """
_image = None
data_source = None
def __init__(self, positions, colors=None, color_stride=1):
- r"""A render source for a sequence of opaque line segments.
-
- This class provides a mechanism for adding lines to a scene; these
- points will be opaque, and can also be colored.
-
- Parameters
- ----------
- positions: array, shape (N, 2, 3)
- These positions, in data-space coordinates, are the starting and
- stopping points for each pair of lines. For example,
- positions[0][0] and positions[0][1] would give the (x, y, z)
- coordinates of the beginning and end points of the first line,
- respectively.
- colors : array, shape (N, 4), optional
- The colors of the points, including an alpha channel, in floating
- point running from 0..1. Note that they correspond to the line
- segment succeeding each point; this means that strictly speaking
- they need only be (N-1) in length.
- color_stride : int, optional
- The stride with which to access the colors when putting them on the
- scene.
-
- Examples
- --------
- >>> source = LineSource(np.random.random((10, 3)))
-
- """
-
super(LineSource, self).__init__()
assert(positions.shape[1] == 2)
@@ -451,6 +509,24 @@
self.color_stride = color_stride
def render(self, camera, zbuffer=None):
+ """Renders an image using the provided camera
+
+ Parameters
+ ----------
+ camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+ A volume rendering camera. Can be any type of camera.
+ zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+ A zbuffer array. This is used for opaque sources to determine the
+ z position of the source relative to other sources. Only useful if
+ you are manually calling render on multiple sources. Scene.render
+ uses this internally.
+
+ Returns
+ -------
+ A :class:`yt.data_objects.image_array.ImageArray` instance containing
+ the rendered image.
+
+ """
vertices = self.positions
if zbuffer is None:
empty = camera.lens.new_image(camera)
@@ -476,26 +552,26 @@
class BoxSource(LineSource):
+ r"""A render source for a box drawn with line segments.
+
+ This render source will draw a box, with transparent faces, in data
+ space coordinates. This is useful for annotations.
+
+ Parameters
+ ----------
+ left_edge: array-like, shape (3,), float
+ The left edge coordinates of the box.
+ right_edge : array-like, shape (3,), float
+ The right edge coordinates of the box.
+ color : array-like, shape (4,), float, optional
+ The colors (including alpha) to use for the lines.
+
+ Examples
+ --------
+ >>> source = BoxSource(grid_obj.LeftEdge, grid_obj.RightEdge)
+
+ """
def __init__(self, left_edge, right_edge, color=None):
- r"""A render source for a box drawn with line segments.
-
- This render source will draw a box, with transparent faces, in data
- space coordinates. This is useful for annotations.
-
- Parameters
- ----------
- left_edge: array-like, shape (3,), float
- The left edge coordinates of the box.
- right_edge : array-like, shape (3,), float
- The right edge coordinates of the box.
- color : array-like, shape (4,), float, optional
- The colors (including alpha) to use for the lines.
-
- Examples
- --------
- >>> source = BoxSource(grid_obj.LeftEdge, grid_obj.RightEdge)
-
- """
if color is None:
color = np.array([1.0, 1.0, 1.0, 1.0])
color = ensure_numpy_array(color)
@@ -513,32 +589,32 @@
class GridSource(LineSource):
+ r"""A render source for drawing grids in a scene.
+
+ This render source will draw blocks that are within a given data
+ source, by default coloring them by their level of resolution.
+
+ Parameters
+ ----------
+ data_source: :class:`~yt.data_objects.api.DataContainer`
+ The data container that will be used to identify grids to draw.
+ alpha : float
+ The opacity of the grids to draw.
+ cmap : color map name
+ The color map to use to map resolution levels to color.
+ min_level : int, optional
+ Minimum level to draw
+ max_level : int, optional
+ Maximum level to draw
+
+ Examples
+ --------
+ >>> dd = ds.sphere("c", (0.1, "unitary"))
+ >>> source = GridSource(dd, alpha=1.0)
+
+ """
def __init__(self, data_source, alpha=0.3, cmap='algae',
min_level=None, max_level=None):
- r"""A render source for drawing grids in a scene.
-
- This render source will draw blocks that are within a given data
- source, by default coloring them by their level of resolution.
-
- Parameters
- ----------
- data_source: :class:`~yt.data_objects.api.DataContainer`
- The data container that will be used to identify grids to draw.
- alpha : float
- The opacity of the grids to draw.
- cmap : color map name
- The color map to use to map resolution levels to color.
- min_level : int, optional
- Minimum level to draw
- max_level : int, optional
- Maximum level to draw
-
- Examples
- --------
- >>> dd = ds.sphere("c", (0.1, "unitary"))
- >>> source = GridSource(dd, alpha=1.0)
-
- """
data_source = data_source_or_all(data_source)
corners = []
levels = []
@@ -586,24 +662,24 @@
class CoordinateVectorSource(OpaqueSource):
+ r"""Draw coordinate vectors on the scene.
+
+ This will draw a set of coordinate vectors on the camera image. They
+ will appear in the lower right of the image.
+
+ Parameters
+ ----------
+ colors: array-like, shape (3,4), optional
+ The x, y, z RGBA values to use to draw the vectors.
+ alpha : float, optional
+ The opacity of the vectors.
+
+ Examples
+ --------
+ >>> source = CoordinateVectorSource()
+
+ """
def __init__(self, colors=None, alpha=1.0):
- r"""Draw coordinate vectors on the scene.
-
- This will draw a set of coordinate vectors on the camera image. They
- will appear in the lower right of the image.
-
- Parameters
- ----------
- colors: array-like, shape (3,4), optional
- The x, y, z RGBA values to use to draw the vectors.
- alpha : float, optional
- The opacity of the vectors.
-
- Examples
- --------
- >>> source = CoordinateVectorSource()
-
- """
super(CoordinateVectorSource, self).__init__()
# If colors aren't individually set, make black with full opacity
if colors is None:
@@ -616,6 +692,24 @@
self.color_stride = 2
def render(self, camera, zbuffer=None):
+ """Renders an image using the provided camera
+
+ Parameters
+ ----------
+ camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+ A volume rendering camera. Can be any type of camera.
+ zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+ A zbuffer array. This is used for opaque sources to determine the
+ z position of the source relative to other sources. Only useful if
+ you are manually calling render on multiple sources. Scene.render
+ uses this internally.
+
+ Returns
+ -------
+ A :class:`yt.data_objects.image_array.ImageArray` instance containing
+ the rendered image.
+
+ """
camera.lens.setup_box_properties(camera)
center = camera.focus
# Get positions at the focus
diff -r 562b253b734a3abc182b712fb9009034ea3b36c6 -r 8f7e36cbf691d0f592c0d7c259f5eb1345d9adea yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -24,39 +24,38 @@
class Scene(object):
- """The Scene Class
+ """A virtual landscape for a volume rendering.
The Scene class is meant to be the primary container for the
new volume rendering framework. A single scene may contain
several Camera and RenderSource instances, and is the primary
driver behind creating a volume rendering.
+ This sets up the basics needed to add sources and cameras.
+ This does very little setup, and requires additional input
+ to do anything useful.
+
+ Parameters
+ ----------
+ None
+
+ Examples
+ --------
+ >>> sc = Scene()
+
"""
_current = None
_camera = None
def __init__(self):
- r"""Create a new Scene instance.
-
- This sets up the basics needed to add sources and cameras.
- This does very little setup, and requires additional input
- to do anything useful.
-
- Parameters
- ----------
- None
-
- Examples
- --------
- >>> sc = Scene()
-
- """
+ r"""Create a new Scene instance"""
super(Scene, self).__init__()
self.sources = OrderedDict()
self.camera = None
def get_source(self, source_num):
+ """Returns the volume rendering source indexed by ``source_num``"""
return list(itervalues(self.sources))[source_num]
def _iter_opaque_sources(self):
@@ -79,9 +78,18 @@
yield k, source
def add_source(self, render_source, keyname=None):
- """
- Add a render source to the scene. This will autodetect the
- type of source.
+ """Add a render source to the scene.
+
+ This will autodetect the type of source.
+
+ Parameters
+ ----------
+ render_source: an instance of :class:`yt.visualization.volume_rendering.render_source.RenderSource`
+ A source to contribute to the volume rendering scene.
+
+ keyname: string (optional)
+ The dictionary key used to reference the source in the sources
+ dictionary.
"""
if keyname is None:
keyname = 'source_%02i' % len(self.sources)
@@ -105,13 +113,13 @@
Image will be clipped before saving to the standard deviation
of the image multiplied by this value. Useful for enhancing
images. Default: None
- camera: :class:`Camera`, optional
+ camera: :class:`yt.visualization.volume_rendering.camera.Camera`, optional
If specified, use a different :class:`Camera` to render the scene.
Returns
-------
- bmp: :class:`ImageArray`
- ImageArray instance of the current rendering image.
+ A :class:`yt.data_objects.image_array.ImageArray` instance containing
+ the current rendering image.
Examples
--------
diff -r 562b253b734a3abc182b712fb9009034ea3b36c6 -r 8f7e36cbf691d0f592c0d7c259f5eb1345d9adea yt/visualization/volume_rendering/zbuffer_array.py
--- a/yt/visualization/volume_rendering/zbuffer_array.py
+++ b/yt/visualization/volume_rendering/zbuffer_array.py
@@ -12,13 +12,40 @@
#-----------------------------------------------------------------------------
-from yt.funcs import mylog
-from yt.data_objects.api import ImageArray
import numpy as np
class ZBuffer(object):
- """docstring for ZBuffer"""
+ """A container object for z-buffer arrays
+
+ A zbuffer is a companion array for an image that allows the volume rendering
+ infrastructure to determine whether one opaque source is in front of another
+ opaque source. The z buffer encodes the distance to the opaque source
+ relative to the camera position.
+
+ Parameters
+ ----------
+ rgba: NxNx4 image
+ The image the z buffer corresponds to
+ z: NxN image
+ The z depth of each pixel in the image. The shape of the image must be
+ the same as each RGBA channel in the original image.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> shape = (64, 64)
+ >>> b1 = Zbuffer(np.random.random(shape), np.ones(shape))
+ >>> b2 = Zbuffer(np.random.random(shape), np.zeros(shape))
+ >>> c = b1 + b2
+ >>> np.all(c.rgba == b2.rgba)
+ True
+ >>> np.all(c.z == b2.z))
+ True
+ >>> np.all(c == b2)
+ True
+
+ """
def __init__(self, rgba, z):
super(ZBuffer, self).__init__()
assert(rgba.shape[:len(z.shape)] == z.shape)
@@ -31,8 +58,8 @@
f = self.z < other.z
if self.z.shape[1] == 1:
# Non-rectangular
- rgba = (self.rgba * f[:,None,:])
- rgba += (other.rgba * (1.0 - f)[:,None,:])
+ rgba = (self.rgba * f[:, None, :])
+ rgba += (other.rgba * (1.0 - f)[:, None, :])
else:
b = self.z > other.z
rgba = np.empty(self.rgba.shape)
https://bitbucket.org/yt_analysis/yt/commits/01ad7b75d4e4/
Changeset: 01ad7b75d4e4
Branch: yt
User: ngoldbaum
Date: 2015-10-16 05:13:45+00:00
Summary: Remove expand_factor and an outdated comment
Affected #: 1 file
diff -r 8f7e36cbf691d0f592c0d7c259f5eb1345d9adea -r 01ad7b75d4e4a840b524638b0a86ec02a6ee714a yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -141,7 +141,6 @@
def __init__(self):
super(PerspectiveLens, self).__init__()
- self.expand_factor = 1.5
def new_image(self, camera):
self.current_image = ImageArray(
@@ -151,13 +150,6 @@
return self.current_image
def _get_sampler_params(self, camera, render_source):
- # We should move away from pre-generation of vectors like this and into
- # the usage of on-the-fly generation in the VolumeIntegrator module
- # We might have a different width and back_center
- # dl = (self.back_center - self.front_center)
- # self.front_center += self.expand_factor*dl
- # self.back_center -= dl
-
if render_source.zbuffer is not None:
image = render_source.zbuffer.rgba
else:
@@ -263,7 +255,6 @@
def __init__(self):
super(StereoPerspectiveLens, self).__init__()
- self.expand_factor = 1.5
self.disparity = None
def new_image(self, camera):
https://bitbucket.org/yt_analysis/yt/commits/90f24d63f7ac/
Changeset: 90f24d63f7ac
Branch: yt
User: ngoldbaum
Date: 2015-10-16 05:14:29+00:00
Summary: Note that the zbuffer RGBA and z arrays can be MxN instead of NxN
Affected #: 1 file
diff -r 01ad7b75d4e4a840b524638b0a86ec02a6ee714a -r 90f24d63f7acf41089847b5c426ddac7e21e7f6c yt/visualization/volume_rendering/zbuffer_array.py
--- a/yt/visualization/volume_rendering/zbuffer_array.py
+++ b/yt/visualization/volume_rendering/zbuffer_array.py
@@ -25,9 +25,9 @@
Parameters
----------
- rgba: NxNx4 image
+ rgba: MxNx4 image
The image the z buffer corresponds to
- z: NxN image
+ z: MxN image
The z depth of each pixel in the image. The shape of the image must be
the same as each RGBA channel in the original image.
https://bitbucket.org/yt_analysis/yt/commits/1a484c166e77/
Changeset: 1a484c166e77
Branch: yt
User: chummels
Date: 2015-10-16 06:08:42+00:00
Summary: Merged in ngoldbaum/yt (pull request #1805)
Add and expand docstrings for all classes and member functions that were missing
Affected #: 6 files
diff -r 00fd411044ba420d06186ebaba2e040e77836f93 -r 1a484c166e77a29f51c9b0c9ab4890a3398de543 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -29,16 +29,15 @@
latex_symbol_lut, unit_prefixes, \
prefixable_units, cgs_base_units, \
mks_base_units, latex_prefixes, yt_base_units
-from yt.units.unit_registry import UnitRegistry
+from yt.units.unit_registry import \
+ UnitRegistry, \
+ UnitParseError
from yt.utilities.exceptions import YTUnitsNotReducible
import copy
import string
import token
-class UnitParseError(Exception):
- pass
-
class InvalidUnitOperation(Exception):
pass
diff -r 00fd411044ba420d06186ebaba2e040e77836f93 -r 1a484c166e77a29f51c9b0c9ab4890a3398de543 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -14,6 +14,7 @@
from yt.funcs import iterable, mylog, ensure_numpy_array
from yt.utilities.orientation import Orientation
from yt.units.yt_array import YTArray
+from yt.units.unit_registry import UnitParseError
from yt.utilities.math_utils import get_rotation_matrix
from .utils import data_source_or_all
from .lens import lenses
@@ -22,16 +23,34 @@
class Camera(Orientation):
- r"""
+ r"""A representation of a point of view into a Scene.
- The Camera class. A Camera represents of point of view into a
- Scene. It is defined by a position (the location of the camera
+ It is defined by a position (the location of the camera
in the simulation domain,), a focus (the point at which the
camera is pointed), a width (the width of the snapshot that will
be taken, a resolution (the number of pixels in the image), and
a north_vector (the "up" direction in the resulting image). A
camera can use a variety of different Lens objects.
+ Parameters
+ ----------
+ data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+ This is the source to be rendered, which can be any arbitrary yt
+ data object or dataset.
+ lens_type: string, optional
+ This specifies the type of lens to use for rendering. Current
+ options are 'plane-parallel', 'perspective', and 'fisheye'. See
+ :class:`yt.visualization.volume_rendering.lens.Lens` for details.
+ Default: 'plane-parallel'
+ auto: boolean
+ If True, build smart defaults using the data source extent. This
+ can be time-consuming to iterate over the entire dataset to find
+ the positional bounds. Default: False
+
+ Examples
+ --------
+ >>> cam = Camera(ds)
+
"""
_moved = True
@@ -42,29 +61,7 @@
def __init__(self, data_source=None, lens_type='plane-parallel',
auto=False):
- """
- Initialize a Camera Instance
-
- Parameters
- ----------
- data_source: :class:`AMR3DData` or :class:`Dataset`, optional
- This is the source to be rendered, which can be any arbitrary yt
- data object or dataset.
- lens_type: string, optional
- This specifies the type of lens to use for rendering. Current
- options are 'plane-parallel', 'perspective', and 'fisheye'. See
- :class:`yt.visualization.volume_rendering.lens.Lens` for details.
- Default: 'plane-parallel'
- auto: boolean
- If True, build smart defaults using the data source extent. This
- can be time-consuming to iterate over the entire dataset to find
- the positional bounds. Default: False
-
- Examples
- --------
- >>> cam = Camera(ds)
-
- """
+ """Initialize a Camera Instance"""
self.lens = None
self.north_vector = None
self.normal_vector = None
@@ -178,9 +175,7 @@
return lens_params
def set_lens(self, lens_type):
- r'''
-
- Set the lens to be used with this camera.
+ r"""Set the lens to be used with this camera.
Parameters
----------
@@ -194,7 +189,7 @@
'spherical'
'stereo-spherical'
- '''
+ """
if lens_type not in lenses:
mylog.error("Lens type not available")
raise RuntimeError()
@@ -202,6 +197,7 @@
self.lens.camera = self
def set_defaults_from_data_source(self, data_source):
+ """Resets the camera attributes to their default values"""
self.position = data_source.pf.domain_right_edge
width = 1.5 * data_source.pf.domain_width.max()
@@ -232,20 +228,22 @@
self._moved = True
def set_width(self, width):
- r"""
-
- Set the width of the image that will be produced by this camera.
- This must be a YTQuantity.
+ r"""Set the width of the image that will be produced by this camera.
Parameters
----------
- width : :class:`yt.units.yt_array.YTQuantity`
-
+ width : YTQuantity or 3 element YTArray
+ The width of the volume rendering in the horizontal, vertical, and
+ depth directions. If a scalar, assumes that the width is the same in
+ all three directions.
"""
- assert isinstance(width, YTArray), 'Width must be created with ds.arr'
- if isinstance(width, YTArray):
+ try:
width = width.in_units('code_length')
+ except (AttributeError, UnitParseError):
+ raise ValueError(
+ 'Volume rendering width must be a YTArray that can be '
+ 'converted to code units')
if not iterable(width):
width = YTArray([width.d]*3, width.units) # Can't get code units.
@@ -253,9 +251,7 @@
self.switch_orientation()
def set_position(self, position, north_vector=None):
- r"""
-
- Set the position of the camera.
+ r"""Set the position of the camera.
Parameters
----------
@@ -273,8 +269,7 @@
north_vector=north_vector)
def switch_orientation(self, normal_vector=None, north_vector=None):
- r"""
- Change the view direction based on any of the orientation parameters.
+ r"""Change the view direction based on any of the orientation parameters.
This will recalculate all the necessary vectors and vector planes
related to an orientable object.
@@ -507,11 +502,6 @@
factor : float
The factor by which to reduce the distance to the focal point.
-
- Notes
- -----
-
- You will need to call snapshot() again to get a new image.
"""
self.set_width(self.width / factor)
diff -r 00fd411044ba420d06186ebaba2e040e77836f93 -r 1a484c166e77a29f51c9b0c9ab4890a3398de543 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -26,15 +26,7 @@
class Lens(ParallelAnalysisInterface):
-
- """
-
- A base class for setting up Lens objects. A Lens,
- along with a Camera, is used to defined the set of
- rays that will be used for rendering.
-
- """
-
+ """A Lens is used to define the set of rays for rendering."""
def __init__(self, ):
super(Lens, self).__init__()
self.viewpoint = None
@@ -48,9 +40,14 @@
self.sampler = None
def set_camera(self, camera):
+ """Set the properties of the lens based on the camera.
+
+ This is a proxy for setup_box_properties
+ """
self.setup_box_properties(camera)
def new_image(self, camera):
+ """Initialize a new ImageArray to be used with this lens."""
self.current_image = ImageArray(
np.zeros((camera.resolution[0], camera.resolution[1],
4), dtype='float64', order='C'),
@@ -58,6 +55,7 @@
return self.current_image
def setup_box_properties(self, camera):
+ """Set up the view and stage based on the properties of the camera."""
unit_vectors = camera.unit_vectors
width = camera.width
center = camera.focus
@@ -80,13 +78,12 @@
class PlaneParallelLens(Lens):
+ r"""The lens for orthographic projections.
- r'''
-
- This lens type is the standard type used for orthographic projections.
All rays emerge parallel to each other, arranged along a plane.
- '''
+ The initializer takes no parameters.
+ """
def __init__(self, ):
super(PlaneParallelLens, self).__init__()
@@ -111,6 +108,7 @@
return sampler_params
def set_viewpoint(self, camera):
+ """Set the viewpoint based on the camera"""
# This is a hack that should be replaced by an alternate plane-parallel
# traversal. Put the camera really far away so that the effective
# viewpoint is infinitely far away, making for parallel rays.
@@ -135,17 +133,14 @@
class PerspectiveLens(Lens):
+ r"""A lens for viewing a scene with a set of rays within an opening angle.
- r'''
-
- This lens type adjusts for an opening view angle, so that the scene will
- have an element of perspective to it.
-
- '''
+ The scene will have an element of perspective to it since the rays are not
+ parallel.
+ """
def __init__(self):
super(PerspectiveLens, self).__init__()
- self.expand_factor = 1.5
def new_image(self, camera):
self.current_image = ImageArray(
@@ -155,13 +150,6 @@
return self.current_image
def _get_sampler_params(self, camera, render_source):
- # We should move away from pre-generation of vectors like this and into
- # the usage of on-the-fly generation in the VolumeIntegrator module
- # We might have a different width and back_center
- # dl = (self.back_center - self.front_center)
- # self.front_center += self.expand_factor*dl
- # self.back_center -= dl
-
if render_source.zbuffer is not None:
image = render_source.zbuffer.rgba
else:
@@ -174,24 +162,30 @@
px = np.mat(np.linspace(-.5, .5, camera.resolution[0]))
py = np.mat(np.linspace(-.5, .5, camera.resolution[1]))
- sample_x = camera.width[0] * np.array(east_vec.reshape(3,1) * px).transpose()
- sample_y = camera.width[1] * np.array(north_vec.reshape(3,1) * py).transpose()
+ sample_x = camera.width[0] * np.array(east_vec.reshape(3, 1) * px)
+ sample_x = sample_x.transpose()
+ sample_y = camera.width[1] * np.array(north_vec.reshape(3, 1) * py)
+ sample_y = sample_y.transpose()
vectors = np.zeros((camera.resolution[0], camera.resolution[1], 3),
dtype='float64', order='C')
- sample_x = np.repeat(sample_x.reshape(camera.resolution[0],1,3), \
+ sample_x = np.repeat(sample_x.reshape(camera.resolution[0], 1, 3),
camera.resolution[1], axis=1)
- sample_y = np.repeat(sample_y.reshape(1,camera.resolution[1],3), \
+ sample_y = np.repeat(sample_y.reshape(1, camera.resolution[1], 3),
camera.resolution[0], axis=0)
- normal_vecs = np.tile(normal_vec, camera.resolution[0] * camera.resolution[1])\
- .reshape(camera.resolution[0], camera.resolution[1], 3)
+ normal_vecs = np.tile(
+ normal_vec, camera.resolution[0] * camera.resolution[1])
+ normal_vecs = normal_vecs.reshape(
+ camera.resolution[0], camera.resolution[1], 3)
vectors = sample_x + sample_y + normal_vecs * camera.width[2]
- positions = np.tile(camera.position, camera.resolution[0] * camera.resolution[1])\
- .reshape(camera.resolution[0], camera.resolution[1], 3)
+ positions = np.tile(
+ camera.position, camera.resolution[0] * camera.resolution[1])
+ positions = positions.reshape(
+ camera.resolution[0], camera.resolution[1], 3)
uv = np.ones(3, dtype='float64')
@@ -234,11 +228,12 @@
if np.arccos(sight_angle_cos) < 0.5 * np.pi:
sight_length = camera.width[2] / sight_angle_cos
else:
- # If the corner is on the backwards, then we put it outside of the image
- # It can not be simply removed because it may connect to other corner
- # within the image, which produces visible domain boundary line
- sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2) / \
- np.sqrt(1 - sight_angle_cos**2)
+ # If the corner is on the backwards, then we put it outside of
+ # the image It can not be simply removed because it may connect
+ # to other corner within the image, which produces visible
+ # domain boundary line
+ sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2)
+ sight_length = sight_length / np.sqrt(1 - sight_angle_cos**2)
pos1[i] = camera.position + sight_length * sight_vector[i]
dx = np.dot(pos1 - sight_center.d, camera.unit_vectors[0])
@@ -256,15 +251,14 @@
class StereoPerspectiveLens(Lens):
-
- """docstring for StereoPerspectiveLens"""
+ """A lens that includes two sources for perspective rays, for 3D viewing"""
def __init__(self):
super(StereoPerspectiveLens, self).__init__()
- self.expand_factor = 1.5
self.disparity = None
def new_image(self, camera):
+ """Initialize a new ImageArray to be used with this lens."""
self.current_image = ImageArray(
np.zeros((camera.resolution[0]*camera.resolution[1], 1,
4), dtype='float64', order='C'),
@@ -275,10 +269,6 @@
# We should move away from pre-generation of vectors like this and into
# the usage of on-the-fly generation in the VolumeIntegrator module
# We might have a different width and back_center
- # dl = (self.back_center - self.front_center)
- # self.front_center += self.expand_factor*dl
- # self.back_center -= dl
-
if self.disparity is None:
self.disparity = camera.width[0] / 2.e3
@@ -287,8 +277,10 @@
else:
image = self.new_image(camera)
- vectors_left, positions_left = self._get_positions_vectors(camera, -self.disparity)
- vectors_right, positions_right = self._get_positions_vectors(camera, self.disparity)
+ vectors_left, positions_left = self._get_positions_vectors(
+ camera, -self.disparity)
+ vectors_right, positions_right = self._get_positions_vectors(
+ camera, self.disparity)
uv = np.ones(3, dtype='float64')
@@ -330,28 +322,37 @@
px = np.mat(np.linspace(-.5, .5, single_resolution_x))
py = np.mat(np.linspace(-.5, .5, camera.resolution[1]))
- sample_x = camera.width[0] * np.array(east_vec_rot.reshape(3,1) * px).transpose()
- sample_y = camera.width[1] * np.array(north_vec.reshape(3,1) * py).transpose()
+ sample_x = camera.width[0] * np.array(east_vec_rot.reshape(3, 1) * px)
+ sample_x = sample_x.transpose()
+ sample_y = camera.width[1] * np.array(north_vec.reshape(3, 1) * py)
+ sample_y = sample_y.transpose()
vectors = np.zeros((single_resolution_x, camera.resolution[1], 3),
dtype='float64', order='C')
- sample_x = np.repeat(sample_x.reshape(single_resolution_x,1,3), \
+ sample_x = np.repeat(sample_x.reshape(single_resolution_x, 1, 3),
camera.resolution[1], axis=1)
- sample_y = np.repeat(sample_y.reshape(1,camera.resolution[1],3), \
+ sample_y = np.repeat(sample_y.reshape(1, camera.resolution[1], 3),
single_resolution_x, axis=0)
- normal_vecs = np.tile(normal_vec_rot, single_resolution_x * camera.resolution[1])\
- .reshape(single_resolution_x, camera.resolution[1], 3)
- east_vecs = np.tile(east_vec_rot, single_resolution_x * camera.resolution[1])\
- .reshape(single_resolution_x, camera.resolution[1], 3)
+ normal_vecs = np.tile(
+ normal_vec_rot, single_resolution_x * camera.resolution[1])
+ normal_vecs = normal_vecs.reshape(
+ single_resolution_x, camera.resolution[1], 3)
+ east_vecs = np.tile(
+ east_vec_rot, single_resolution_x * camera.resolution[1])
+ east_vecs = east_vecs.reshape(
+ single_resolution_x, camera.resolution[1], 3)
vectors = sample_x + sample_y + normal_vecs * camera.width[2]
- positions = np.tile(camera.position, single_resolution_x * camera.resolution[1])\
- .reshape(single_resolution_x, camera.resolution[1], 3)
+ positions = np.tile(
+ camera.position, single_resolution_x * camera.resolution[1])
+ positions = positions.reshape(
+ single_resolution_x, camera.resolution[1], 3)
- positions = positions + east_vecs * disparity # Here the east_vecs is non-rotated one
+ # Here the east_vecs is non-rotated one
+ positions = positions + east_vecs * disparity
mylog.debug(positions)
mylog.debug(vectors)
@@ -365,8 +366,10 @@
if self.disparity is None:
self.disparity = camera.width[0] / 2.e3
- px_left, py_left, dz_left = self._get_px_py_dz(camera, pos, res, -self.disparity)
- px_right, py_right, dz_right = self._get_px_py_dz(camera, pos, res, self.disparity)
+ px_left, py_left, dz_left = self._get_px_py_dz(
+ camera, pos, res, -self.disparity)
+ px_right, py_right, dz_right = self._get_px_py_dz(
+ camera, pos, res, self.disparity)
px = np.hstack([px_left, px_right])
py = np.hstack([py_left, py_right])
@@ -402,16 +405,18 @@
if np.arccos(sight_angle_cos) < 0.5 * np.pi:
sight_length = camera.width[2] / sight_angle_cos
else:
- # If the corner is on the backwards, then we put it outside of the image
- # It can not be simply removed because it may connect to other corner
- # within the image, which produces visible domain boundary line
- sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2) / \
- np.sqrt(1 - sight_angle_cos**2)
+ # If the corner is on the backwards, then we put it outside of
+ # the image It can not be simply removed because it may connect
+ # to other corner within the image, which produces visible
+ # domain boundary line
+ sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2)
+ sight_length = sight_length / np.sqrt(1 - sight_angle_cos**2)
pos1[i] = camera_position_shift + sight_length * sight_vector[i]
dx = np.dot(pos1 - sight_center.d, east_vec_rot)
dy = np.dot(pos1 - sight_center.d, north_vec)
dz = np.dot(pos1 - sight_center.d, normal_vec_rot)
+
# Transpose into image coords.
px = (res0_h * 0.5 + res0_h / camera.width[0].d * dx).astype('int')
py = (res[1] * 0.5 + res[1] / camera.width[1].d * dy).astype('int')
@@ -431,14 +436,13 @@
class FisheyeLens(Lens):
+ r"""A lens for dome-based renderings
- r"""
-
- This lens type accepts a field-of-view property, fov, that describes how wide
- an angle the fisheye can see. Fisheye images are typically used for dome-based
- presentations; the Hayden planetarium for instance has a field of view of 194.6.
- The images returned by this camera will be flat pixel images that can and should
- be reshaped to the resolution.
+ This lens type accepts a field-of-view property, fov, that describes how
+ wide an angle the fisheye can see. Fisheye images are typically used for
+ dome-based presentations; the Hayden planetarium for instance has a field of
+ view of 194.6. The images returned by this camera will be flat pixel images
+ that can and should be reshaped to the resolution.
"""
@@ -450,11 +454,13 @@
self.rotation_matrix = np.eye(3)
def setup_box_properties(self, camera):
+ """Set up the view and stage based on the properties of the camera."""
self.radius = camera.width.max()
super(FisheyeLens, self).setup_box_properties(camera)
self.set_viewpoint(camera)
def new_image(self, camera):
+ """Initialize a new ImageArray to be used with this lens."""
self.current_image = ImageArray(
np.zeros((camera.resolution[0]**2, 1,
4), dtype='float64', order='C'),
@@ -489,9 +495,7 @@
return sampler_params
def set_viewpoint(self, camera):
- """
- For a FisheyeLens, the viewpoint is the front center.
- """
+ """For a FisheyeLens, the viewpoint is the camera's position"""
self.viewpoint = camera.position
def __repr__(self):
@@ -530,12 +534,11 @@
class SphericalLens(Lens):
+ r"""A lens for cylindrical-spherical projection.
- r"""
+ Movies rendered in this way can be displayed in head-tracking devices or
+ in YouTube 360 view.
- This is a cylindrical-spherical projection. Movies rendered in this way
- can be displayed in head-tracking devices or in YouTube 360 view.
-
"""
def __init__(self):
@@ -545,6 +548,7 @@
self.rotation_matrix = np.eye(3)
def setup_box_properties(self, camera):
+ """Set up the view and stage based on the properties of the camera."""
self.radius = camera.width.max()
super(SphericalLens, self).setup_box_properties(camera)
self.set_viewpoint(camera)
@@ -562,11 +566,13 @@
vectors[:, :, 2] = np.sin(py)
vectors = vectors * camera.width[0]
- positions = np.tile(camera.position, camera.resolution[0] * camera.resolution[1])\
- .reshape(camera.resolution[0], camera.resolution[1], 3)
+ positions = np.tile(
+ camera.position,
+ camera.resolution[0] * camera.resolution[1]).reshape(
+ camera.resolution[0], camera.resolution[1], 3)
- R1 = get_rotation_matrix(0.5*np.pi, [1,0,0])
- R2 = get_rotation_matrix(0.5*np.pi, [0,0,1])
+ R1 = get_rotation_matrix(0.5*np.pi, [1, 0, 0])
+ R2 = get_rotation_matrix(0.5*np.pi, [0, 0, 1])
uv = np.dot(R1, camera.unit_vectors)
uv = np.dot(R2, uv)
vectors.reshape((camera.resolution[0]*camera.resolution[1], 3))
@@ -595,9 +601,7 @@
return sampler_params
def set_viewpoint(self, camera):
- """
- For a PerspectiveLens, the viewpoint is the front center.
- """
+ """For a SphericalLens, the viewpoint is the camera's position"""
self.viewpoint = camera.position
def project_to_plane(self, camera, pos, res=None):
@@ -631,8 +635,11 @@
class StereoSphericalLens(Lens):
+ r"""A lens for a stereo cylindrical-spherical projection.
- """docstring for StereoSphericalLens"""
+ Movies rendered in this way can be displayed in VR devices or stereo youtube
+ 360 degree movies.
+ """
def __init__(self):
super(StereoSphericalLens, self).__init__()
@@ -651,31 +658,35 @@
self.disparity = camera.width[0] / 1000.
single_resolution_x = np.floor(camera.resolution[0])/2
- px = np.linspace(-np.pi, np.pi, single_resolution_x, endpoint=True)[:,None]
- py = np.linspace(-np.pi/2., np.pi/2., camera.resolution[1], endpoint=True)[None,:]
+ px = np.linspace(-np.pi, np.pi, single_resolution_x,
+ endpoint=True)[:, None]
+ py = np.linspace(-np.pi/2., np.pi/2., camera.resolution[1],
+ endpoint=True)[None, :]
vectors = np.zeros((single_resolution_x, camera.resolution[1], 3),
dtype='float64', order='C')
- vectors[:,:,0] = np.cos(px) * np.cos(py)
- vectors[:,:,1] = np.sin(px) * np.cos(py)
- vectors[:,:,2] = np.sin(py)
+ vectors[:, :, 0] = np.cos(px) * np.cos(py)
+ vectors[:, :, 1] = np.sin(px) * np.cos(py)
+ vectors[:, :, 2] = np.sin(py)
vectors = vectors * camera.width[0]
vectors2 = np.zeros((single_resolution_x, camera.resolution[1], 3),
dtype='float64', order='C')
- vectors2[:,:,0] = -np.sin(px) * np.ones((1, camera.resolution[1]))
- vectors2[:,:,1] = np.cos(px) * np.ones((1, camera.resolution[1]))
- vectors2[:,:,2] = 0
+ vectors2[:, :, 0] = -np.sin(px) * np.ones((1, camera.resolution[1]))
+ vectors2[:, :, 1] = np.cos(px) * np.ones((1, camera.resolution[1]))
+ vectors2[:, :, 2] = 0
- positions = np.tile(camera.position, single_resolution_x * camera.resolution[1])\
- .reshape(single_resolution_x, camera.resolution[1], 3)
+ positions = np.tile(
+ camera.position, single_resolution_x * camera.resolution[1])
+ positions = positions.reshape(
+ single_resolution_x, camera.resolution[1], 3)
# The left and right are switched here since VR is in LHS.
positions_left = positions + vectors2 * self.disparity
positions_right = positions + vectors2 * (-self.disparity)
- R1 = get_rotation_matrix(0.5*np.pi, [1,0,0])
- R2 = get_rotation_matrix(0.5*np.pi, [0,0,1])
+ R1 = get_rotation_matrix(0.5*np.pi, [1, 0, 0])
+ R2 = get_rotation_matrix(0.5*np.pi, [0, 0, 1])
uv = np.dot(R1, camera.unit_vectors)
uv = np.dot(R2, uv)
vectors.reshape((single_resolution_x*camera.resolution[1], 3))
diff -r 00fd411044ba420d06186ebaba2e040e77836f93 -r 1a484c166e77a29f51c9b0c9ab4890a3398de543 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -39,10 +39,9 @@
class RenderSource(ParallelAnalysisInterface):
- """
+ """Base Class for Render Sources.
- Base Class for Render Sources. Will be inherited for volumes,
- streamlines, etc.
+ Will be inherited for volumes, streamlines, etc.
"""
@@ -59,10 +58,9 @@
class OpaqueSource(RenderSource):
- """
+ """A base class for opaque render sources.
- A base class for opaque render sources. Will be inherited from
- for LineSources, BoxSources, etc.
+ Will be inherited from for LineSources, BoxSources, etc.
"""
def __init__(self):
@@ -72,50 +70,37 @@
def set_zbuffer(self, zbuffer):
self.zbuffer = zbuffer
- def render(self, camera, zbuffer=None):
- # This is definitely wrong for now
- if zbuffer is not None and self.zbuffer is not None:
- zbuffer.rgba = self.zbuffer.rgba
- zbuffer.z = self.zbuffer.z
- self.zbuffer = zbuffer
- return self.zbuffer
-
class VolumeSource(RenderSource):
+ """A class for rendering data from a volumetric data source
- """
+ Examples of such sources include a sphere, cylinder, or the
+ entire computational domain.
- A VolumeSource is a class for rendering data from
- an arbitrary volumetric data source, e.g. a sphere,
- cylinder, or the entire computational domain.
+ A :class:`VolumeSource` provides the framework to decompose an arbitrary
+ yt data source into bricks that can be traversed and volume rendered.
+ Parameters
+ ----------
+ data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+ This is the source to be rendered, which can be any arbitrary yt
+ data object or dataset.
+ fields : string
+ The name of the field(s) to be rendered.
+ auto: bool, optional
+ If True, will build a default AMRKDTree and transfer function based
+ on the data.
+
+ Examples
+ --------
+ >>> source = VolumeSource(ds.all_data(), 'density')
"""
_image = None
data_source = None
def __init__(self, data_source, field, auto=True):
- r"""Initialize a new volumetric source for rendering.
-
- A :class:`VolumeSource` provides the framework to decompose an arbitrary
- yt data source into bricks that can be traversed and volume rendered.
-
- Parameters
- ----------
- data_source: :class:`AMR3DData` or :class:`Dataset`, optional
- This is the source to be rendered, which can be any arbitrary yt
- data object or dataset.
- fields : string
- The name of the field(s) to be rendered.
- auto: bool, optional
- If True, will build a default AMRKDTree and transfer function based
- on the data.
-
- Examples
- --------
- >>> source = RenderSource(ds, 'density')
-
- """
+ r"""Initialize a new volumetric source for rendering."""
super(VolumeSource, self).__init__()
self.data_source = data_source_or_all(data_source)
field = self.data_source._determine_fields(field)[0]
@@ -138,13 +123,12 @@
self.build_defaults()
def build_defaults(self):
+ """Sets a default volume and transfer function"""
self.build_default_volume()
self.build_default_transfer_function()
def set_transfer_function(self, transfer_function):
- """
- Set transfer function for this source
- """
+ """Set transfer function for this source"""
if not isinstance(transfer_function,
(TransferFunction, ColorTransferFunction,
ProjectionTransferFunction)):
@@ -167,6 +151,7 @@
raise RuntimeError("Transfer Function not Supplied")
def build_default_transfer_function(self):
+ """Sets up a transfer function"""
self.tfh = \
TransferFunctionHelper(self.data_source.pf)
self.tfh.set_field(self.field)
@@ -175,6 +160,7 @@
self.transfer_function = self.tfh.tf
def build_default_volume(self):
+ """Sets up an AMRKDTree based on the VolumeSource's field"""
self.volume = AMRKDTree(self.data_source.pf,
data_source=self.data_source)
log_fields = [self.data_source.pf.field_info[self.field].take_log]
@@ -182,17 +168,23 @@
self.volume.set_fields([self.field], log_fields, True)
def set_volume(self, volume):
+ """Associates an AMRKDTree with the VolumeSource"""
assert(isinstance(volume, AMRKDTree))
del self.volume
self.volume = volume
- def set_field(self, field, no_ghost=True):
- field = self.data_source._determine_fields(field)[0]
- log_field = self.data_source.pf.field_info[field].take_log
- self.volume.set_fields(field, [log_field], no_ghost)
- self.field = field
+ def set_fields(self, fields, no_ghost=True):
+ """Set the source's fields to render
- def set_fields(self, fields, no_ghost=True):
+ Parameters
+ ---------
+ fields: field name or list of field names
+ The field or fields to render
+ no_ghost: boolean
+ If False, the AMRKDTree estimates vertex centered data using ghost
+ zones, which can eliminate seams in the resulting volume rendering.
+ Defaults to True for performance reasons.
+ """
fields = self.data_source._determine_fields(fields)
log_fields = [self.data_source.ds.field_info[f].take_log
for f in fields]
@@ -200,7 +192,12 @@
self.field = fields
def set_sampler(self, camera):
- """docstring for add_sampler"""
+ """Sets a volume render sampler
+
+ The type of sampler is determined based on the ``sampler_type`` attribute
+ of the VolumeSource. Currently the ``volume_render`` and ``projection``
+ sampler types are supported.
+ """
if self.sampler_type == 'volume-render':
sampler = new_volume_render_sampler(camera, self)
elif self.sampler_type == 'projection':
@@ -211,6 +208,24 @@
assert(self.sampler is not None)
def render(self, camera, zbuffer=None):
+ """Renders an image using the provided camera
+
+ Parameters
+ ----------
+ camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+ A volume rendering camera. Can be any type of camera.
+ zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+ A zbuffer array. This is used for opaque sources to determine the
+ z position of the source relative to other sources. Only useful if
+ you are manually calling render on multiple sources. Scene.render
+ uses this internally.
+
+ Returns
+ -------
+ A :class:`yt.data_objects.image_array.ImageArray` instance containing
+ the rendered image.
+
+ """
self.zbuffer = zbuffer
self.set_sampler(camera)
assert (self.sampler is not None)
@@ -238,11 +253,25 @@
return self.current_image
def finalize_image(self, camera, image, call_from_VR=False):
+ """Parallel reduce the image.
+
+ Parameters
+ ----------
+ camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+ The camera used to produce the volume rendering image.
+ image: :class:`yt.data_objects.image_array.ImageArray` instance
+ A reference to an image to fill
+ call_from_vr: boolean, optional
+ Whether or not this is being called from a higher level in the VR
+ interface. Used to set the correct orientation.
+ """
image = self.volume.reduce_tree_images(image,
camera.lens.viewpoint)
image.shape = camera.resolution[0], camera.resolution[1], 4
- # If the call is from VR, the image is rotated by 180 to get correct up dir
- if call_from_VR: image = np.rot90(image, k=2)
+ # If the call is from VR, the image is rotated by 180 to get correct
+ # up dirirection
+ if call_from_VR is True:
+ image = np.rot90(image, k=2)
if self.transfer_function.grey_opacity is False:
image[:, :, 3] = 1.0
return image
@@ -254,38 +283,33 @@
class MeshSource(RenderSource):
+ """A source for unstructured mesh data
- """
+ This functionality requires the embree ray-tracing engine and the
+ associated pyembree python bindings to be installed in order to
+ function.
- MeshSource is a class for volume rendering unstructured mesh
- data. This functionality requires the embree ray-tracing
- engine and the associated pyembree python bindings to be
- installed in order to function.
+ A :class:`MeshSource` provides the framework to volume render
+ unstructured mesh data.
+ Parameters
+ ----------
+ data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+ This is the source to be rendered, which can be any arbitrary yt
+ data object or dataset.
+ field : string
+ The name of the field to be rendered.
+
+ Examples
+ --------
+ >>> source = MeshSource(ds, ('all', 'convected'))
"""
_image = None
data_source = None
def __init__(self, data_source, field):
- r"""Initialize a new unstructured source for rendering.
-
- A :class:`MeshSource` provides the framework to volume render
- unstructured mesh data.
-
- Parameters
- ----------
- data_source: :class:`AMR3DData` or :class:`Dataset`, optional
- This is the source to be rendered, which can be any arbitrary yt
- data object or dataset.
- fields : string
- The name of the field to be rendered.
-
- Examples
- --------
- >>> source = MeshSource(ds, ('all', 'convected'))
-
- """
+ r"""Initialize a new unstructured source for rendering."""
super(MeshSource, self).__init__()
self.data_source = data_source_or_all(data_source)
field = self.data_source._determine_fields(field)[0]
@@ -323,7 +347,24 @@
field_data.d)
def render(self, camera, zbuffer=None):
+ """Renders an image using the provided camera
+ Parameters
+ ----------
+ camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+ A volume rendering camera. Can be any type of camera.
+ zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+ A zbuffer array. This is used for opaque sources to determine the
+ z position of the source relative to other sources. Only useful if
+ you are manually calling render on multiple sources. Scene.render
+ uses this internally.
+
+ Returns
+ -------
+ A :class:`yt.data_objects.image_array.ImageArray` instance containing
+ the rendered image.
+
+ """
self.sampler = new_mesh_sampler(camera, self)
mylog.debug("Casting rays")
@@ -340,33 +381,33 @@
class PointSource(OpaqueSource):
+ r"""A rendering source of opaque points in the scene.
+
+ This class provides a mechanism for adding points to a scene; these
+ points will be opaque, and can also be colored.
+
+ Parameters
+ ----------
+ positions: array, shape (N, 3)
+ These positions, in data-space coordinates, are the points to be
+ added to the scene.
+ colors : array, shape (N, 4), optional
+ The colors of the points, including an alpha channel, in floating
+ point running from 0..1.
+ color_stride : int, optional
+ The stride with which to access the colors when putting them on the
+ scene.
+
+ Examples
+ --------
+ >>> source = PointSource(particle_positions)
+
+ """
_image = None
data_source = None
def __init__(self, positions, colors=None, color_stride=1):
- r"""A rendering source of opaque points in the scene.
-
- This class provides a mechanism for adding points to a scene; these
- points will be opaque, and can also be colored.
-
- Parameters
- ----------
- positions: array, shape (N, 3)
- These positions, in data-space coordinates, are the points to be
- added to the scene.
- colors : array, shape (N, 4), optional
- The colors of the points, including an alpha channel, in floating
- point running from 0..1.
- color_stride : int, optional
- The stride with which to access the colors when putting them on the
- scene.
-
- Examples
- --------
- >>> source = PointSource(particle_positions)
-
- """
self.positions = positions
# If colors aren't individually set, make black with full opacity
if colors is None:
@@ -376,6 +417,24 @@
self.color_stride = color_stride
def render(self, camera, zbuffer=None):
+ """Renders an image using the provided camera
+
+ Parameters
+ ----------
+ camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+ A volume rendering camera. Can be any type of camera.
+ zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+ A zbuffer array. This is used for opaque sources to determine the
+ z position of the source relative to other sources. Only useful if
+ you are manually calling render on multiple sources. Scene.render
+ uses this internally.
+
+ Returns
+ -------
+ A :class:`yt.data_objects.image_array.ImageArray` instance containing
+ the rendered image.
+
+ """
vertices = self.positions
if zbuffer is None:
empty = camera.lens.new_image(camera)
@@ -401,39 +460,38 @@
class LineSource(OpaqueSource):
+ r"""A render source for a sequence of opaque line segments.
+
+ This class provides a mechanism for adding lines to a scene; these
+ points will be opaque, and can also be colored.
+
+ Parameters
+ ----------
+ positions: array, shape (N, 2, 3)
+ These positions, in data-space coordinates, are the starting and
+ stopping points for each pair of lines. For example,
+ positions[0][0] and positions[0][1] would give the (x, y, z)
+ coordinates of the beginning and end points of the first line,
+ respectively.
+ colors : array, shape (N, 4), optional
+ The colors of the points, including an alpha channel, in floating
+ point running from 0..1. Note that they correspond to the line
+ segment succeeding each point; this means that strictly speaking
+ they need only be (N-1) in length.
+ color_stride : int, optional
+ The stride with which to access the colors when putting them on the
+ scene.
+
+ Examples
+ --------
+ >>> source = LineSource(np.random.random((10, 3)))
+
+ """
_image = None
data_source = None
def __init__(self, positions, colors=None, color_stride=1):
- r"""A render source for a sequence of opaque line segments.
-
- This class provides a mechanism for adding lines to a scene; these
- points will be opaque, and can also be colored.
-
- Parameters
- ----------
- positions: array, shape (N, 2, 3)
- These positions, in data-space coordinates, are the starting and
- stopping points for each pair of lines. For example,
- positions[0][0] and positions[0][1] would give the (x, y, z)
- coordinates of the beginning and end points of the first line,
- respectively.
- colors : array, shape (N, 4), optional
- The colors of the points, including an alpha channel, in floating
- point running from 0..1. Note that they correspond to the line
- segment succeeding each point; this means that strictly speaking
- they need only be (N-1) in length.
- color_stride : int, optional
- The stride with which to access the colors when putting them on the
- scene.
-
- Examples
- --------
- >>> source = LineSource(np.random.random((10, 3)))
-
- """
-
super(LineSource, self).__init__()
assert(positions.shape[1] == 2)
@@ -451,6 +509,24 @@
self.color_stride = color_stride
def render(self, camera, zbuffer=None):
+ """Renders an image using the provided camera
+
+ Parameters
+ ----------
+ camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+ A volume rendering camera. Can be any type of camera.
+ zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+ A zbuffer array. This is used for opaque sources to determine the
+ z position of the source relative to other sources. Only useful if
+ you are manually calling render on multiple sources. Scene.render
+ uses this internally.
+
+ Returns
+ -------
+ A :class:`yt.data_objects.image_array.ImageArray` instance containing
+ the rendered image.
+
+ """
vertices = self.positions
if zbuffer is None:
empty = camera.lens.new_image(camera)
@@ -476,26 +552,26 @@
class BoxSource(LineSource):
+ r"""A render source for a box drawn with line segments.
+
+ This render source will draw a box, with transparent faces, in data
+ space coordinates. This is useful for annotations.
+
+ Parameters
+ ----------
+ left_edge: array-like, shape (3,), float
+ The left edge coordinates of the box.
+ right_edge : array-like, shape (3,), float
+ The right edge coordinates of the box.
+ color : array-like, shape (4,), float, optional
+ The colors (including alpha) to use for the lines.
+
+ Examples
+ --------
+ >>> source = BoxSource(grid_obj.LeftEdge, grid_obj.RightEdge)
+
+ """
def __init__(self, left_edge, right_edge, color=None):
- r"""A render source for a box drawn with line segments.
-
- This render source will draw a box, with transparent faces, in data
- space coordinates. This is useful for annotations.
-
- Parameters
- ----------
- left_edge: array-like, shape (3,), float
- The left edge coordinates of the box.
- right_edge : array-like, shape (3,), float
- The right edge coordinates of the box.
- color : array-like, shape (4,), float, optional
- The colors (including alpha) to use for the lines.
-
- Examples
- --------
- >>> source = BoxSource(grid_obj.LeftEdge, grid_obj.RightEdge)
-
- """
if color is None:
color = np.array([1.0, 1.0, 1.0, 1.0])
color = ensure_numpy_array(color)
@@ -513,32 +589,32 @@
class GridSource(LineSource):
+ r"""A render source for drawing grids in a scene.
+
+ This render source will draw blocks that are within a given data
+ source, by default coloring them by their level of resolution.
+
+ Parameters
+ ----------
+ data_source: :class:`~yt.data_objects.api.DataContainer`
+ The data container that will be used to identify grids to draw.
+ alpha : float
+ The opacity of the grids to draw.
+ cmap : color map name
+ The color map to use to map resolution levels to color.
+ min_level : int, optional
+ Minimum level to draw
+ max_level : int, optional
+ Maximum level to draw
+
+ Examples
+ --------
+ >>> dd = ds.sphere("c", (0.1, "unitary"))
+ >>> source = GridSource(dd, alpha=1.0)
+
+ """
def __init__(self, data_source, alpha=0.3, cmap='algae',
min_level=None, max_level=None):
- r"""A render source for drawing grids in a scene.
-
- This render source will draw blocks that are within a given data
- source, by default coloring them by their level of resolution.
-
- Parameters
- ----------
- data_source: :class:`~yt.data_objects.api.DataContainer`
- The data container that will be used to identify grids to draw.
- alpha : float
- The opacity of the grids to draw.
- cmap : color map name
- The color map to use to map resolution levels to color.
- min_level : int, optional
- Minimum level to draw
- max_level : int, optional
- Maximum level to draw
-
- Examples
- --------
- >>> dd = ds.sphere("c", (0.1, "unitary"))
- >>> source = GridSource(dd, alpha=1.0)
-
- """
data_source = data_source_or_all(data_source)
corners = []
levels = []
@@ -586,24 +662,24 @@
class CoordinateVectorSource(OpaqueSource):
+ r"""Draw coordinate vectors on the scene.
+
+ This will draw a set of coordinate vectors on the camera image. They
+ will appear in the lower right of the image.
+
+ Parameters
+ ----------
+ colors: array-like, shape (3,4), optional
+ The x, y, z RGBA values to use to draw the vectors.
+ alpha : float, optional
+ The opacity of the vectors.
+
+ Examples
+ --------
+ >>> source = CoordinateVectorSource()
+
+ """
def __init__(self, colors=None, alpha=1.0):
- r"""Draw coordinate vectors on the scene.
-
- This will draw a set of coordinate vectors on the camera image. They
- will appear in the lower right of the image.
-
- Parameters
- ----------
- colors: array-like, shape (3,4), optional
- The x, y, z RGBA values to use to draw the vectors.
- alpha : float, optional
- The opacity of the vectors.
-
- Examples
- --------
- >>> source = CoordinateVectorSource()
-
- """
super(CoordinateVectorSource, self).__init__()
# If colors aren't individually set, make black with full opacity
if colors is None:
@@ -616,6 +692,24 @@
self.color_stride = 2
def render(self, camera, zbuffer=None):
+ """Renders an image using the provided camera
+
+ Parameters
+ ----------
+ camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+ A volume rendering camera. Can be any type of camera.
+ zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+ A zbuffer array. This is used for opaque sources to determine the
+ z position of the source relative to other sources. Only useful if
+ you are manually calling render on multiple sources. Scene.render
+ uses this internally.
+
+ Returns
+ -------
+ A :class:`yt.data_objects.image_array.ImageArray` instance containing
+ the rendered image.
+
+ """
camera.lens.setup_box_properties(camera)
center = camera.focus
# Get positions at the focus
diff -r 00fd411044ba420d06186ebaba2e040e77836f93 -r 1a484c166e77a29f51c9b0c9ab4890a3398de543 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -24,39 +24,38 @@
class Scene(object):
- """The Scene Class
+ """A virtual landscape for a volume rendering.
The Scene class is meant to be the primary container for the
new volume rendering framework. A single scene may contain
several Camera and RenderSource instances, and is the primary
driver behind creating a volume rendering.
+ This sets up the basics needed to add sources and cameras.
+ This does very little setup, and requires additional input
+ to do anything useful.
+
+ Parameters
+ ----------
+ None
+
+ Examples
+ --------
+ >>> sc = Scene()
+
"""
_current = None
_camera = None
def __init__(self):
- r"""Create a new Scene instance.
-
- This sets up the basics needed to add sources and cameras.
- This does very little setup, and requires additional input
- to do anything useful.
-
- Parameters
- ----------
- None
-
- Examples
- --------
- >>> sc = Scene()
-
- """
+ r"""Create a new Scene instance"""
super(Scene, self).__init__()
self.sources = OrderedDict()
self.camera = None
def get_source(self, source_num):
+ """Returns the volume rendering source indexed by ``source_num``"""
return list(itervalues(self.sources))[source_num]
def _iter_opaque_sources(self):
@@ -79,9 +78,18 @@
yield k, source
def add_source(self, render_source, keyname=None):
- """
- Add a render source to the scene. This will autodetect the
- type of source.
+ """Add a render source to the scene.
+
+ This will autodetect the type of source.
+
+ Parameters
+ ----------
+ render_source: an instance of :class:`yt.visualization.volume_rendering.render_source.RenderSource`
+ A source to contribute to the volume rendering scene.
+
+ keyname: string (optional)
+ The dictionary key used to reference the source in the sources
+ dictionary.
"""
if keyname is None:
keyname = 'source_%02i' % len(self.sources)
@@ -105,13 +113,13 @@
Image will be clipped before saving to the standard deviation
of the image multiplied by this value. Useful for enhancing
images. Default: None
- camera: :class:`Camera`, optional
+ camera: :class:`yt.visualization.volume_rendering.camera.Camera`, optional
If specified, use a different :class:`Camera` to render the scene.
Returns
-------
- bmp: :class:`ImageArray`
- ImageArray instance of the current rendering image.
+ A :class:`yt.data_objects.image_array.ImageArray` instance containing
+ the current rendering image.
Examples
--------
diff -r 00fd411044ba420d06186ebaba2e040e77836f93 -r 1a484c166e77a29f51c9b0c9ab4890a3398de543 yt/visualization/volume_rendering/zbuffer_array.py
--- a/yt/visualization/volume_rendering/zbuffer_array.py
+++ b/yt/visualization/volume_rendering/zbuffer_array.py
@@ -12,13 +12,40 @@
#-----------------------------------------------------------------------------
-from yt.funcs import mylog
-from yt.data_objects.api import ImageArray
import numpy as np
class ZBuffer(object):
- """docstring for ZBuffer"""
+ """A container object for z-buffer arrays
+
+ A zbuffer is a companion array for an image that allows the volume rendering
+ infrastructure to determine whether one opaque source is in front of another
+ opaque source. The z buffer encodes the distance to the opaque source
+ relative to the camera position.
+
+ Parameters
+ ----------
+ rgba: MxNx4 image
+ The image the z buffer corresponds to
+ z: MxN image
+ The z depth of each pixel in the image. The shape of the image must be
+ the same as each RGBA channel in the original image.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> shape = (64, 64)
+ >>> b1 = Zbuffer(np.random.random(shape), np.ones(shape))
+ >>> b2 = Zbuffer(np.random.random(shape), np.zeros(shape))
+ >>> c = b1 + b2
+ >>> np.all(c.rgba == b2.rgba)
+ True
+ >>> np.all(c.z == b2.z))
+ True
+ >>> np.all(c == b2)
+ True
+
+ """
def __init__(self, rgba, z):
super(ZBuffer, self).__init__()
assert(rgba.shape[:len(z.shape)] == z.shape)
@@ -31,8 +58,8 @@
f = self.z < other.z
if self.z.shape[1] == 1:
# Non-rectangular
- rgba = (self.rgba * f[:,None,:])
- rgba += (other.rgba * (1.0 - f)[:,None,:])
+ rgba = (self.rgba * f[:, None, :])
+ rgba += (other.rgba * (1.0 - f)[:, None, :])
else:
b = self.z > other.z
rgba = np.empty(self.rgba.shape)
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list