[yt-svn] commit/yt: ngoldbaum: Merged in atmyers/yt (pull request #1936)

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Jan 27 09:28:13 PST 2016


1 new commit in yt:

https://bitbucket.org/yt_analysis/yt/commits/5eb209b48f9a/
Changeset:   5eb209b48f9a
Branch:      yt
User:        ngoldbaum
Date:        2016-01-27 17:28:08+00:00
Summary:     Merged in atmyers/yt (pull request #1936)

Making the unstructured mesh rendering API consistent with the other Volume Renderings
Affected #:  5 files

diff -r ccd5c2bf28d081a7ce4b2e2faa4c08fe9d0c37b0 -r 5eb209b48f9a45ef1ff4ec25de90e019401e0de0 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -77,49 +77,59 @@
 
 .. python-script::
 
-   import yt
-   from yt.visualization.volume_rendering.api import MeshSource, Camera
-   import yt.utilities.png_writer as pw
+    import yt
 
-   ds = yt.load("MOOSE_sample_data/out.e-s010")
+    ds = yt.load("MOOSE_sample_data/out.e-s010")
 
-   ms = MeshSource(ds, ('connect1', 'diffused'))
+    # create a default scene
+    sc = yt.create_scene(ds)
 
-   # setup the camera
-   cam = Camera(ds)
-   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
+    # override the default colormap
+    ms = sc.get_source(0)
+    ms.cmap = 'Eos A'
 
-   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
-   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
-   cam.set_position(cam_pos, north_vector)
+    # adjust the camera position and orientation
+    cam = sc.camera
+    cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')
+    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    cam.set_position(cam_pos, north_vector)
 
-   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
-   pw.write_png(im, 'hex_mesh_render.png')
+    # increase the default resolution
+    cam.resolution = (800, 800)
+
+    # render and save
+    sc.save()
 
 You can also overplot the mesh boundaries:
 
 .. python-script::
 
-   import yt
-   from yt.visualization.volume_rendering.api import MeshSource, Camera
-   import yt.utilities.png_writer as pw
+    import yt
 
-   ds = yt.load("MOOSE_sample_data/out.e-s010")
+    ds = yt.load("MOOSE_sample_data/out.e-s010")
 
-   ms = MeshSource(ds, ('connect1', 'diffused'))
+    # create a default scene
+    sc = yt.create_scene(ds)
 
-   # setup the camera
-   cam = Camera(ds)
-   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
+    # override the default colormap
+    ms = sc.get_source(0)
+    ms.cmap = 'Eos A'
 
-   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
-   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
-   cam.set_position(cam_pos, north_vector)
-   cam.resolution = (800, 800)
+    # adjust the camera position and orientation
+    cam = sc.camera
+    cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')
+    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    cam.set_position(cam_pos, north_vector)
 
-   ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
-   im = ms.annotate_mesh_lines()
-   pw.write_png(im, 'hex_render_with_mesh.png')
+    # increase the default resolution
+    cam.resolution = (800, 800)
+
+    # render, draw the element boundaries, and save
+    sc.render()
+    sc.annotate_mesh_lines()
+    sc.save()
 
 As with slices, you can visualize different meshes and different fields. For example,
 Here is a script similar to the above that plots the "diffused" variable 
@@ -127,24 +137,29 @@
 
 .. python-script::
 
-   import yt
-   from yt.visualization.volume_rendering.api import MeshSource, Camera
-   import yt.utilities.png_writer as pw
+    import yt
 
-   ds = yt.load("MOOSE_sample_data/out.e-s010")
-
-   ms = MeshSource(ds, ('connect2', 'diffused'))
-
-   # setup the camera
-   cam = Camera(ds)
-   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
-
-   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
-   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
-   cam.set_position(cam_pos, north_vector)
-
-   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
-   pw.write_png(im, 'hex_mesh_render.png')
+    ds = yt.load("MOOSE_sample_data/out.e-s010")
+   
+    # create a default scene
+    sc = yt.create_scene(ds, ('connect2', 'diffused'))
+   
+    # override the default colormap
+    ms = sc.get_source(0)
+    ms.cmap = 'Eos A'
+   
+    # adjust the camera position and orientation
+    cam = sc.camera
+    cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')
+    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    cam.set_position(cam_pos, north_vector)
+   
+    # increase the default resolution
+    cam.resolution = (800, 800)
+   
+    # render and save
+    sc.save()
 
 Next, here is an example of rendering a dataset with tetrahedral mesh elements.
 Note that in this dataset, there are multiple "steps" per file, so we specify
@@ -152,49 +167,64 @@
 
 .. python-script::
 
-   import yt
-   from yt.visualization.volume_rendering.api import MeshSource, Camera
-   import yt.utilities.png_writer as pw
+    import yt
 
-   filename = "MOOSE_sample_data/high_order_elems_tet4_refine_out.e"
-   ds = yt.load(filename, step=-1)  # we look at the last time frame
+    filename = "MOOSE_sample_data/high_order_elems_tet4_refine_out.e"
+    ds = yt.load(filename, step=-1)  # we look at the last time frame
 
-   ms = MeshSource(ds, ('connect1', 'u'))
+    # create a default scene
+    sc = yt.create_scene(ds, ("connect1", "u"))
 
-   # setup the camera 
-   cam = Camera(ds)
-   camera_position = ds.arr([3.0, 3.0, 3.0], 'code_length')
-   cam.set_width(ds.arr([2.0, 2.0, 2.0], 'code_length'))
-   north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
-   cam.set_position(camera_position, north_vector)
+    # override the default colormap
+    ms = sc.get_source(0)
+    ms.cmap = 'Eos A'
 
-   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 1.0))
-   pw.write_png(im, 'tetra_render.png')
+    # adjust the camera position and orientation
+    cam = sc.camera
+    camera_position = ds.arr([3.0, 3.0, 3.0], 'code_length')
+    cam.set_width(ds.arr([2.0, 2.0, 2.0], 'code_length'))
+    north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
+    cam.set_position(camera_position, north_vector)
+
+    # increase the default resolution
+    cam.resolution = (800, 800)
+
+    # render and save
+    sc.save()
 
 Another example, this time plotting the temperature field from a 20-node hex 
 MOOSE dataset:
 
 .. python-script::
 
-   import yt
-   from yt.visualization.volume_rendering.api import MeshSource, Camera
-   import yt.utilities.png_writer as pw
+    import yt
 
-   ds = yt.load("MOOSE_sample_data/mps_out.e", step=-1)  # we load the last time frame
+    # We load the last time frame
+    ds = yt.load("MOOSE_sample_data/mps_out.e", step=-1)
 
-   ms = MeshSource(ds, ('connect2', 'temp'))
+    # create a default scene
+    sc = yt.create_scene(ds, ("connect2", "temp"))
 
-   # set up the camera
-   cam = Camera(ds)
-   camera_position = ds.arr([-1.0, 1.0, -0.5], 'code_length')
-   north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
-   cam.width = ds.arr([0.04, 0.04, 0.04], 'code_length')
-   cam.resolution = (800, 800)
-   cam.set_position(camera_position, north_vector)
+    # override the default colormap. This time we also override
+    # the default color bounds
+    ms = sc.get_source(0)
+    ms.cmap = 'hot'
+    ms.color_bounds = (500.0, 1700.0)
 
-   im = ms.render(cam, cmap='hot', color_bounds=(500.0, 1700.0))
-   im = ms.annotate_mesh_lines()
-   pw.write_png(im, 'hex20_render.png')
+    # adjust the camera position and orientation
+    cam = sc.camera
+    camera_position = ds.arr([-1.0, 1.0, -0.5], 'code_length')
+    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    cam.width = ds.arr([0.04, 0.04, 0.04], 'code_length')
+    cam.set_position(camera_position, north_vector)
+
+    # increase the default resolution
+    cam.resolution = (800, 800)
+
+    # render, draw the element boundaries, and save
+    sc.render()
+    sc.annotate_mesh_lines()
+    sc.save()
 
 As with other volume renderings in yt, you can swap out different lenses. Here is 
 an example that uses a "perspective" lens, for which the rays diverge from the 
@@ -202,25 +232,35 @@
 
 .. python-script::
 
-   import yt
-   from yt.visualization.volume_rendering.api import MeshSource, Camera
-   import yt.utilities.png_writer as pw
+    import yt
+    from yt.visualization.volume_rendering.api import Camera
 
-   ds = yt.load("MOOSE_sample_data/out.e-s010")
+    ds = yt.load("MOOSE_sample_data/out.e-s010")
 
-   ms = MeshSource(ds, ('connect2', 'diffused'))
+    # create a default scene
+    sc = yt.create_scene(ds, ("connect2", "diffused"))
 
-   # setup the camera
-   cam = Camera(ds, lens_type='perspective')
-   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
-
-   cam_pos = ds.arr([-4.5, 4.5, -4.5], 'code_length')  # the camera location
-   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
-   cam.set_position(cam_pos, north_vector)
-
-   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
-   im = ms.annotate_mesh_lines()
-   pw.write_png(im, 'hex_mesh_render_perspective.png')
+    # override the default colormap
+    ms = sc.get_source(0)
+    ms.cmap = 'Eos A'
+   
+    # Create a perspective Camera
+    cam = Camera(ds, lens_type='perspective')
+    cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    cam_pos = ds.arr([-4.5, 4.5, -4.5], 'code_length')
+    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    cam.set_position(cam_pos, north_vector)
+   
+    # tell our scene to use it
+    sc.camera = cam
+   
+    # increase the default resolution
+    cam.resolution = (800, 800)
+   
+    # render, draw the element boundaries, and save
+    sc.render()
+    sc.annotate_mesh_lines()
+    sc.save()
 
 You can also create scenes that have multiple meshes. The ray-tracing infrastructure
 will keep track of the depth information for each source separately, and composite
@@ -231,20 +271,21 @@
 
     import yt
     from yt.visualization.volume_rendering.api import MeshSource, Camera, Scene
-    import yt.utilities.png_writer as pw
 
     ds = yt.load("MOOSE_sample_data/out.e-s010")
 
     # this time we create an empty scene and add sources to it one-by-one
     sc = Scene()
 
+    # set up our Camera
     cam = Camera(ds)
     cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
     cam.set_position(ds.arr([-3.0, 3.0, -3.0], 'code_length'),
-                     ds.arr([0.0, 1.0, 0.0], 'dimensionless'))
+    ds.arr([0.0, 1.0, 0.0], 'dimensionless'))
     cam.set_width = ds.arr([8.0, 8.0, 8.0], 'code_length')
     cam.resolution = (800, 800)
 
+    # tell the scene to use it
     sc.camera = cam
 
     # create two distinct MeshSources from 'connect1' and 'connect2'
@@ -254,10 +295,9 @@
     sc.add_source(ms1)
     sc.add_source(ms2)
 
+    # render and save
     im = sc.render()
-
-    pw.write_png(im, 'composite_render.png')
-
+    sc.save()
 
 Making Movies
 ^^^^^^^^^^^^^
@@ -270,30 +310,37 @@
 
 .. code-block:: python
 
-   import yt
-   from yt.visualization.volume_rendering.api import MeshSource, Camera
-   import yt.utilities.png_writer as pw
+    import yt
+    import numpy as np
 
-   ds = yt.load("MOOSE_sample_data/out.e-s010")
+    ds = yt.load("MOOSE_sample_data/out.e-s010")
 
-   ms = MeshSource(ds, ('connect1', 'diffused'))
+    # create a default scene
+    sc = yt.create_scene(ds)
 
-   # setup the camera
-   cam = Camera(ds)
-   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
+    # override the default colormap
+    ms = sc.get_source(0)
+    ms.cmap = 'Eos A'
 
-   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
-   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
-   cam.set_position(cam_pos, north_vector)
-   cam.resolution = (800, 800)
-   cam.steady_north = True
+    # adjust the camera position and orientation
+    cam = sc.camera
+    cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')
+    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    cam.set_position(cam_pos, north_vector)
 
-   # make movie frames
-   num_frames = 301
-   for i in range(num_frames):
-       cam.rotate(2.0*np.pi/num_frames)
-       im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
-       pw.write_png(im, 'movie_frames/surface_render_%.4d.png' % i)
+    # increase the default resolution
+    cam.resolution = (800, 800)
+
+    # set the camera to use "steady_north"
+    cam.steady_north = True
+
+    # make movie frames
+    num_frames = 301
+    for i in range(num_frames):
+        cam.rotate(2.0*np.pi/num_frames)
+        sc.render()
+        sc.save('movie_frames/surface_render_%.4d.png' % i)
 
 Finally, this example demonstrates how to loop over the time steps in a single
 file with a fixed camera position:

diff -r ccd5c2bf28d081a7ce4b2e2faa4c08fe9d0c37b0 -r 5eb209b48f9a45ef1ff4ec25de90e019401e0de0 yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -74,6 +74,7 @@
                                               units_override=units_override)
         self.index_filename = filename
         self.storage_filename = storage_filename
+        self.default_field = ("connect1", "diffused")
 
     def _set_code_unit_attributes(self):
         # This is where quantities are created that represent the various

diff -r ccd5c2bf28d081a7ce4b2e2faa4c08fe9d0c37b0 -r 5eb209b48f9a45ef1ff4ec25de90e019401e0de0 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -342,13 +342,56 @@
         self.mesh = None
         self.current_image = None
 
+        # default color map
+        self._cmap = 'algae'
+        self._color_bounds = None
+
+        # default mesh annotation options
+        self._annotate_mesh = False
+        self._mesh_line_color = None
+        self._mesh_line_alpha = 1.0
+
         # Error checking
         assert(self.field is not None)
         assert(self.data_source is not None)
 
         self.scene = mesh_traversal.YTEmbreeScene()
+        self.build_mesh()
 
-        self.build_mesh()
+    def cmap():
+        '''
+        This is the name of the colormap that will be used when rendering
+        this MeshSource object. Should be a string, like 'algae', or 'hot'.
+        
+        '''
+
+        def fget(self):
+            return self._cmap
+
+        def fset(self, cmap_name):
+            self._cmap = cmap_name
+            if hasattr(self, "data"):
+                self.current_image = self.apply_colormap()
+        return locals()
+    cmap = property(**cmap())
+
+    def color_bounds():
+        '''
+        These are the bounds that will be used with the colormap to the display
+        the rendered image. Should be a (vmin, vmax) tuple, like (0.0, 2.0). If
+        None, the bounds will be automatically inferred from the max and min of 
+        the rendered data.
+
+        '''
+        def fget(self):
+            return self._color_bounds
+
+        def fset(self, bounds):
+            self._color_bounds = bounds
+            if hasattr(self, "data"):
+                self.current_image = self.apply_colormap()
+        return locals()
+    color_bounds = property(**color_bounds())
 
     def _validate(self):
         """Make sure that all dependencies have been met"""
@@ -406,7 +449,7 @@
                                                             indices,
                                                             field_data)
 
-    def render(self, camera, zbuffer=None, cmap='algae', color_bounds=None):
+    def render(self, camera, zbuffer=None):
         """Renders an image using the provided camera
 
         Parameters
@@ -436,6 +479,7 @@
         elif zbuffer.rgba.shape != shape:
             zbuffer = ZBuffer(zbuffer.rgba.reshape(shape),
                               zbuffer.z.reshape(shape[:2]))
+        self.zbuffer = zbuffer
 
         self.sampler = new_mesh_sampler(camera, self)
 
@@ -444,17 +488,19 @@
         mylog.debug("Done casting rays")
 
         self.finalize_image(camera, self.sampler.aimage)
-
         self.data = self.sampler.aimage
-        self.current_image = self.apply_colormap(cmap=cmap,
-                                                 color_bounds=color_bounds)
+        self.current_image = self.apply_colormap()
 
         zbuffer += ZBuffer(self.current_image.astype('float64'),
                            self.sampler.zbuffer)
-        zbuffer.rgba = ImageArray(zbuffer.rgba.astype('uint8'))
+        zbuffer.rgba = ImageArray(zbuffer.rgba)
         self.zbuffer = zbuffer
-        self.zbuffer.rgba = self.zbuffer.rgba.astype('uint8')
         self.current_image = self.zbuffer.rgba
+
+        if self._annotate_mesh:
+            self.current_image = self.annotate_mesh_lines(self._mesh_line_color,
+                                                          self._mesh_line_alpha)
+
         return self.current_image
 
     def finalize_image(self, camera, image):
@@ -466,7 +512,7 @@
         sam.mesh_lines = sam.mesh_lines.reshape(Nx, Ny)
         sam.zbuffer = sam.zbuffer.reshape(Nx, Ny)
 
-    def annotate_mesh_lines(self, color=None, alpha=255):
+    def annotate_mesh_lines(self, color=None, alpha=1.0):
         r"""
 
         Modifies this MeshSource by drawing the mesh lines.
@@ -475,7 +521,7 @@
 
         Parameters
         ----------
-        colors: array of ints, shape (4), optional
+        color: array of ints, shape (4), optional
             The RGBA value to use to draw the mesh lines.
             Default is black.
         alpha : float, optional
@@ -483,6 +529,10 @@
 
         """
 
+        self.annotate_mesh = True
+        self._mesh_line_color = color
+        self._mesh_line_alpha = alpha
+
         if color is None:
             color = np.array([0, 0, 0, alpha])
 
@@ -495,10 +545,7 @@
 
         return self.current_image
 
-    def apply_colormap(self, cmap='algae', color_bounds=None):
-        self.current_image = apply_colormap(self.data,
-                                            color_bounds=color_bounds,
-                                            cmap_name=cmap)
+    def apply_colormap(self):
         '''
 
         Applies a colormap to the current image without re-rendering.
@@ -518,10 +565,14 @@
 
 
         '''
-        alpha = self.current_image[:, :, 3]
+
+        image = apply_colormap(self.data,
+                               color_bounds=self._color_bounds,
+                               cmap_name=self._cmap)/255.
+        alpha = image[:, :, 3]
         alpha[self.sampler.image_used == -1] = 0.0
-        self.current_image[:, :, 3] = alpha        
-        return self.current_image
+        image[:, :, 3] = alpha        
+        return image
 
     def __repr__(self):
         disp = "<Mesh Source>:%s " % str(self.data_source)

diff -r ccd5c2bf28d081a7ce4b2e2faa4c08fe9d0c37b0 -r 5eb209b48f9a45ef1ff4ec25de90e019401e0de0 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -18,7 +18,7 @@
 from yt.extern.six import iteritems, itervalues, string_types
 from .camera import Camera
 from .render_source import OpaqueSource, BoxSource, CoordinateVectorSource, \
-    GridSource, RenderSource
+    GridSource, RenderSource, MeshSource
 from .zbuffer_array import ZBuffer
 from yt.extern.six.moves import builtins
 from yt.utilities.exceptions import YTNotInsideNotebook
@@ -400,6 +400,26 @@
         self.add_source(grids)
         return self
 
+    def annotate_mesh_lines(self, color=None, alpha=1.0):
+        """
+
+        Modifies this Scene by drawing the mesh line boundaries
+        on all MeshSources.
+
+        Parameters
+        ----------
+        colors: array of ints, shape (4), optional
+            The RGBA value to use to draw the mesh lines.
+            Default is black.
+        alpha : float, optional
+            The opacity of the mesh lines. Default is 255 (solid).
+
+        """
+        for k, source in self._iter_opaque_sources():
+            if isinstance(source, MeshSource):
+                source.annotate_mesh_lines(color=color, alpha=alpha)
+        return self
+
     def annotate_axes(self, colors=None, alpha=1.0):
         r"""
 

diff -r ccd5c2bf28d081a7ce4b2e2faa4c08fe9d0c37b0 -r 5eb209b48f9a45ef1ff4ec25de90e019401e0de0 yt/visualization/volume_rendering/volume_rendering.py
--- a/yt/visualization/volume_rendering/volume_rendering.py
+++ b/yt/visualization/volume_rendering/volume_rendering.py
@@ -14,7 +14,8 @@
 
 from .scene import Scene
 from .camera import Camera
-from .render_source import VolumeSource
+from .render_source import VolumeSource, \
+    MeshSource
 from .utils import data_source_or_all
 from yt.funcs import mylog
 from yt.utilities.exceptions import YTSceneFieldNotFound
@@ -60,6 +61,7 @@
     >>> ds = yt.load("Enzo_64/DD0046/DD0046")
     >>> sc = yt.create_scene(ds)
     """
+
     data_source = data_source_or_all(data_source)
     sc = Scene()
     if field is None:
@@ -69,8 +71,12 @@
                   Please specify a field in create_scene()""" % (field, data_source.ds))
         mylog.info('Setting default field to %s' % field.__repr__())
 
-    vol = VolumeSource(data_source, field=field)
-    sc.add_source(vol)
+    if hasattr(data_source.ds.index, "meshes"):
+        source = MeshSource(data_source, field=field)
+    else:
+        source = VolumeSource(data_source, field=field)
+
+    sc.add_source(source)
     sc.camera = Camera(data_source=data_source, lens_type=lens_type)
     return sc

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list