[yt-svn] commit/yt: 3 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed May 24 09:42:32 PDT 2017


3 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/e445d00ea384/
Changeset:   e445d00ea384
User:        ngoldbaum
Date:        2017-05-19 19:45:31+00:00
Summary:     only rotate volume rendering images after compositing all sources. closes #1410
Affected #:  2 files

diff -r 2c99a58e7aed85ec4948345fefb6cb39e392aaf9 -r e445d00ea38494697bfeda1e0934055babcb8097 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -444,16 +444,17 @@
             self.sampler(brick, num_threads=self.num_threads)
             total_cells += np.prod(brick.my_data[0].shape)
         mylog.debug("Done casting rays")
+        self.current_image = self.finalize_image(
+            camera, self.sampler.aimage)
 
-        self.current_image = self.finalize_image(camera,
-                                                 self.sampler.aimage,
-                                                 call_from_VR=True)
         if zbuffer is None:
-            self.zbuffer = ZBuffer(self.current_image,
-                                   np.full(self.current_image.shape[:2], np.inf))
+            self.zbuffer = ZBuffer(
+                self.current_image,
+                np.full(self.current_image.shape[:2], np.inf))
+
         return self.current_image
 
-    def finalize_image(self, camera, image, call_from_VR=False):
+    def finalize_image(self, camera, image):
         """Parallel reduce the image.
 
         Parameters
@@ -462,17 +463,12 @@
             The camera used to produce the volume rendering image.
         image: :class:`yt.data_objects.image_array.ImageArray` instance
             A reference to an image to fill
-        call_from_vr: boolean, optional
-            Whether or not this is being called from a higher level in the VR
-            interface. Used to set the correct orientation.
         """
         if self._volume is not None:
             image = self.volume.reduce_tree_images(image, camera.lens.viewpoint)
         image.shape = camera.resolution[0], camera.resolution[1], 4
         # If the call is from VR, the image is rotated by 180 to get correct
         # up direction
-        if call_from_VR is True:
-            image = np.rot90(image, k=2)
         if self.transfer_function.grey_opacity is False:
             image[:, :, 3] = 1
         return image
@@ -712,6 +708,7 @@
 
         return self.current_image
 
+
     def finalize_image(self, camera):
         sam = self.sampler
 
@@ -720,11 +717,6 @@
         Ny = camera.resolution[1]
         self.data = sam.aimage[:,:,0].reshape(Nx, Ny)
 
-        # rotate
-        self.data = np.rot90(self.data, k=2)
-        sam.aimage_used = np.rot90(sam.aimage_used, k=2)
-        sam.amesh_lines = np.rot90(sam.amesh_lines, k=2)
-        sam.azbuffer = np.rot90(sam.azbuffer, k=2)
 
     def annotate_mesh_lines(self, color=None, alpha=1.0):
         r"""

diff -r 2c99a58e7aed85ec4948345fefb6cb39e392aaf9 -r e445d00ea38494697bfeda1e0934055babcb8097 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -501,7 +501,9 @@
             im = source.render(camera, zbuffer=opaque)
             opaque.rgba = im
 
-        return im
+        # rotate image 180 degrees so orientation agrees with e.g.
+        # a PlotWindow plot
+        return np.rot90(im, k=2)
 
     def add_camera(self, data_source=None, lens_type='plane-parallel',
                    auto=False):


https://bitbucket.org/yt_analysis/yt/commits/26ac41c870cb/
Changeset:   26ac41c870cb
User:        ngoldbaum
Date:        2017-05-19 21:06:09+00:00
Summary:     ensure externally exposed sampler attributes are ndarrays
Affected #:  1 file

diff -r e445d00ea38494697bfeda1e0934055babcb8097 -r 26ac41c870cb545d2b96096f088b1ccb21a5bc47 yt/utilities/lib/image_samplers.pyx
--- a/yt/utilities/lib/image_samplers.pyx
+++ b/yt/utilities/lib/image_samplers.pyx
@@ -119,9 +119,12 @@
         self.image.x_vec = <np.float64_t *> x_vec.data
         self.ay_vec = y_vec
         self.image.y_vec = <np.float64_t *> y_vec.data
-        self.image.zbuffer = self.azbuffer = zbuffer
-        self.image.image_used = self.aimage_used = image_used
-        self.image.mesh_lines = self.amesh_lines = mesh_lines
+        self.image.zbuffer = zbuffer
+        self.azbuffer = np.asarray(zbuffer)
+        self.image.image_used = image_used
+        self.aimage_used = np.asarray(image_used)
+        self.image.mesh_lines = mesh_lines
+        self.amesh_lines = np.asarray(mesh_lines)
         self.image.nv[0] = image.shape[0]
         self.image.nv[1] = image.shape[1]
         for i in range(4): self.image.bounds[i] = bounds[i]


https://bitbucket.org/yt_analysis/yt/commits/24e049078e06/
Changeset:   24e049078e06
User:        atmyers
Date:        2017-05-24 16:42:20+00:00
Summary:     Merge pull request #1411 from ngoldbaum/vr-orientation-fix

only rotate volume rendering images after compositing all sources
Affected #:  3 files

diff -r b477a656ca440f55267d6d556ed6758726272405 -r 24e049078e06a910fd24d8a24faf970196ae178c yt/utilities/lib/image_samplers.pyx
--- a/yt/utilities/lib/image_samplers.pyx
+++ b/yt/utilities/lib/image_samplers.pyx
@@ -119,9 +119,12 @@
         self.image.x_vec = <np.float64_t *> x_vec.data
         self.ay_vec = y_vec
         self.image.y_vec = <np.float64_t *> y_vec.data
-        self.image.zbuffer = self.azbuffer = zbuffer
-        self.image.image_used = self.aimage_used = image_used
-        self.image.mesh_lines = self.amesh_lines = mesh_lines
+        self.image.zbuffer = zbuffer
+        self.azbuffer = np.asarray(zbuffer)
+        self.image.image_used = image_used
+        self.aimage_used = np.asarray(image_used)
+        self.image.mesh_lines = mesh_lines
+        self.amesh_lines = np.asarray(mesh_lines)
         self.image.nv[0] = image.shape[0]
         self.image.nv[1] = image.shape[1]
         for i in range(4): self.image.bounds[i] = bounds[i]

diff -r b477a656ca440f55267d6d556ed6758726272405 -r 24e049078e06a910fd24d8a24faf970196ae178c yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -444,16 +444,17 @@
             self.sampler(brick, num_threads=self.num_threads)
             total_cells += np.prod(brick.my_data[0].shape)
         mylog.debug("Done casting rays")
+        self.current_image = self.finalize_image(
+            camera, self.sampler.aimage)
 
-        self.current_image = self.finalize_image(camera,
-                                                 self.sampler.aimage,
-                                                 call_from_VR=True)
         if zbuffer is None:
-            self.zbuffer = ZBuffer(self.current_image,
-                                   np.full(self.current_image.shape[:2], np.inf))
+            self.zbuffer = ZBuffer(
+                self.current_image,
+                np.full(self.current_image.shape[:2], np.inf))
+
         return self.current_image
 
-    def finalize_image(self, camera, image, call_from_VR=False):
+    def finalize_image(self, camera, image):
         """Parallel reduce the image.
 
         Parameters
@@ -462,17 +463,12 @@
             The camera used to produce the volume rendering image.
         image: :class:`yt.data_objects.image_array.ImageArray` instance
             A reference to an image to fill
-        call_from_vr: boolean, optional
-            Whether or not this is being called from a higher level in the VR
-            interface. Used to set the correct orientation.
         """
         if self._volume is not None:
             image = self.volume.reduce_tree_images(image, camera.lens.viewpoint)
         image.shape = camera.resolution[0], camera.resolution[1], 4
         # If the call is from VR, the image is rotated by 180 to get correct
         # up direction
-        if call_from_VR is True:
-            image = np.rot90(image, k=2)
         if self.transfer_function.grey_opacity is False:
             image[:, :, 3] = 1
         return image
@@ -712,6 +708,7 @@
 
         return self.current_image
 
+
     def finalize_image(self, camera):
         sam = self.sampler
 
@@ -720,11 +717,6 @@
         Ny = camera.resolution[1]
         self.data = sam.aimage[:,:,0].reshape(Nx, Ny)
 
-        # rotate
-        self.data = np.rot90(self.data, k=2)
-        sam.aimage_used = np.rot90(sam.aimage_used, k=2)
-        sam.amesh_lines = np.rot90(sam.amesh_lines, k=2)
-        sam.azbuffer = np.rot90(sam.azbuffer, k=2)
 
     def annotate_mesh_lines(self, color=None, alpha=1.0):
         r"""

diff -r b477a656ca440f55267d6d556ed6758726272405 -r 24e049078e06a910fd24d8a24faf970196ae178c yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -501,7 +501,9 @@
             im = source.render(camera, zbuffer=opaque)
             opaque.rgba = im
 
-        return im
+        # rotate image 180 degrees so orientation agrees with e.g.
+        # a PlotWindow plot
+        return np.rot90(im, k=2)
 
     def add_camera(self, data_source=None, lens_type='plane-parallel',
                    auto=False):

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list