[yt-svn] commit/yt: 4 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Jun 1 13:28:46 PDT 2016


4 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/11a403f619f9/
Changeset:   11a403f619f9
Branch:      yt
User:        jisuoqing
Date:        2016-05-17 17:51:44+00:00
Summary:     Bug fix and improvement in stereo-spherical-lens
Affected #:  2 files

diff -r 3f50767dec553868bf966a8e027f308f545e84c2 -r 11a403f619f9c2e428ad01c5cd020236db62fbf4 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1431,7 +1431,7 @@
 
     This is a wrapper around np.hstack that preserves units.
     """
-    v = np.vstack(arrs)
+    v = np.hstack(arrs)
     v = validate_numpy_wrapper_units(v, arrs)
     return v
 

diff -r 3f50767dec553868bf966a8e027f308f545e84c2 -r 11a403f619f9c2e428ad01c5cd020236db62fbf4 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -18,7 +18,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 from yt.data_objects.image_array import ImageArray
-from yt.units.yt_array import unorm, uvstack
+from yt.units.yt_array import unorm, uvstack, uhstack
 from yt.utilities.math_utils import get_rotation_matrix
 import numpy as np
 
@@ -736,13 +736,13 @@
         if self.disparity is None:
             self.disparity = camera.width[0] / 1000.
 
-        single_resolution_x = int(np.floor(camera.resolution[0]) / 2)
-        px = np.linspace(-np.pi, np.pi, single_resolution_x,
+        single_resolution_y = int(np.floor(camera.resolution[1]) / 2)
+        px = np.linspace(-np.pi, np.pi, camera.resolution[0],
                          endpoint=True)[:, None]
-        py = np.linspace(-np.pi/2., np.pi/2., camera.resolution[1],
+        py = np.linspace(-np.pi/2., np.pi/2., single_resolution_y,
                          endpoint=True)[None, :]
 
-        vectors = np.zeros((single_resolution_x, camera.resolution[1], 3),
+        vectors = np.zeros((camera.resolution[0], single_resolution_y, 3),
                            dtype='float64', order='C')
         vectors[:, :, 0] = np.cos(px) * np.cos(py)
         vectors[:, :, 1] = np.sin(px) * np.cos(py)
@@ -755,29 +755,34 @@
         # Rescale the ray to be long enough to cover the entire domain
         vectors = vectors * max_length
 
-        vectors2 = np.zeros((single_resolution_x, camera.resolution[1], 3),
+        R1 = get_rotation_matrix(0.5*np.pi, [1, 0, 0])
+        R2 = get_rotation_matrix(0.5*np.pi, [0, 0, 1])
+        uv = np.dot(R1, camera.unit_vectors)
+        uv = np.dot(R2, uv)
+
+        vectors.reshape((camera.resolution[0]*single_resolution_y, 3))
+        vectors = np.dot(vectors, uv)
+        vectors.reshape((camera.resolution[0], single_resolution_y, 3))
+
+        vectors2 = np.zeros((camera.resolution[0], single_resolution_y, 3),
                             dtype='float64', order='C')
-        vectors2[:, :, 0] = -np.sin(px) * np.ones((1, camera.resolution[1]))
-        vectors2[:, :, 1] = np.cos(px) * np.ones((1, camera.resolution[1]))
+        vectors2[:, :, 0] = -np.sin(px) * np.ones((1, single_resolution_y))
+        vectors2[:, :, 1] = np.cos(px) * np.ones((1, single_resolution_y))
         vectors2[:, :, 2] = 0
 
+        vectors2.reshape((camera.resolution[0]*single_resolution_y, 3))
+        vectors2 = np.dot(vectors2, uv)
+        vectors2.reshape((camera.resolution[0], single_resolution_y, 3))
+
         positions = np.tile(
-            camera.position, single_resolution_x * camera.resolution[1])
+            camera.position, camera.resolution[0] * single_resolution_y)
         positions = positions.reshape(
-            single_resolution_x, camera.resolution[1], 3)
+            camera.resolution[0], single_resolution_y, 3)
 
         # The left and right are switched here since VR is in LHS.
         positions_left = positions + vectors2 * self.disparity
         positions_right = positions + vectors2 * (-self.disparity)
 
-        R1 = get_rotation_matrix(0.5*np.pi, [1, 0, 0])
-        R2 = get_rotation_matrix(0.5*np.pi, [0, 0, 1])
-        uv = np.dot(R1, camera.unit_vectors)
-        uv = np.dot(R2, uv)
-        vectors.reshape((single_resolution_x*camera.resolution[1], 3))
-        vectors = np.dot(vectors, uv)
-        vectors.reshape((single_resolution_x, camera.resolution[1], 3))
-
         if render_source.zbuffer is not None:
             image = render_source.zbuffer.rgba
         else:
@@ -785,8 +790,8 @@
 
         dummy = np.ones(3, dtype='float64')
 
-        vectors_comb = uvstack([vectors, vectors])
-        positions_comb = uvstack([positions_left, positions_right])
+        vectors_comb = uhstack([vectors, vectors])
+        positions_comb = uhstack([positions_left, positions_right])
 
         image.shape = (camera.resolution[0], camera.resolution[1], 4)
         vectors_comb.shape = (camera.resolution[0], camera.resolution[1], 3)


https://bitbucket.org/yt_analysis/yt/commits/b802a9f3dc41/
Changeset:   b802a9f3dc41
Branch:      yt
User:        jisuoqing
Date:        2016-05-17 22:51:51+00:00
Summary:     Update tests and docs
Affected #:  2 files

diff -r 11a403f619f9c2e428ad01c5cd020236db62fbf4 -r b802a9f3dc41a8f200b8ea2de1f27a3fb7d851e2 doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -85,6 +85,7 @@
 cam = sc.add_camera(ds, lens_type='spherical')
 # Set the size ratio of the final projection to be 2:1, since spherical lens
 # will generate the final image with length of 2*pi and height of pi.
+# Recommended resolution for YouTube 360-degree videos is [3840, 2160]
 cam.resolution = [500, 250]
 # Standing at (x=0.4, y=0.5, z=0.5), we look in all the radial directions
 # from this point in spherical coordinate.
@@ -99,9 +100,11 @@
 
 # Stereo-spherical lens
 cam = sc.add_camera(ds, lens_type='stereo-spherical')
-# Set the size ratio of the final projection to be 4:1, since spherical-perspective lens
-# will generate the final image with both left-eye and right-eye ones jointed together.
-cam.resolution = [1000, 250]
+# Set the size ratio of the final projection to be 1:1, since spherical-perspective lens
+# will generate the final image with both left-eye and right-eye ones jointed together,
+# with left-eye image on top and right-eye image on bottom.
+# Recommended resolution for YouTube virtual reality videos is [3840, 2160]
+cam.resolution = [500, 500]
 cam.position = ds.arr([0.4, 0.5, 0.5], 'code_length')
 cam.switch_orientation(normal_vector=normal_vector,
                        north_vector=north_vector)

diff -r 11a403f619f9c2e428ad01c5cd020236db62fbf4 -r b802a9f3dc41a8f200b8ea2de1f27a3fb7d851e2 yt/visualization/volume_rendering/tests/test_lenses.py
--- a/yt/visualization/volume_rendering/tests/test_lenses.py
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -116,7 +116,7 @@
         w = self.ds.arr(w, 'code_length')
         sc = Scene()
         cam = sc.add_camera(self.ds, lens_type='stereo-spherical')
-        cam.resolution = [1024, 256]
+        cam.resolution = [512, 512]
         cam.position = self.ds.arr(np.array([0.6, 0.5, 0.5]), 'code_length')
         vol = VolumeSource(self.ds, field=self.field)
         tf = vol.transfer_function


https://bitbucket.org/yt_analysis/yt/commits/5d9db5ac17e7/
Changeset:   5d9db5ac17e7
Branch:      yt
User:        jisuoqing
Date:        2016-05-25 19:11:21+00:00
Summary:     Add docs for stereo-spherical lens
Affected #:  1 file

diff -r b802a9f3dc41a8f200b8ea2de1f27a3fb7d851e2 -r 5d9db5ac17e79e66d7a1ca1e7d150beb000b525d doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -31,8 +31,8 @@
 as grid or continent lines, and then to render a production-quality
 visualization.  By changing the "lens" used, a single camera path can output
 images suitable for planetarium domes, immersive and head tracking systems
-(such as the Oculus Rift or recent "spherical" movie viewers such as the
-mobile YouTube app), as well as standard screens.
+(such as the Oculus Rift or recent 360-degree/virtual reality movie viewers
+such as the mobile YouTube app), as well as standard screens.
 
 .. image:: _images/scene_diagram.svg
    :width: 50%
@@ -327,13 +327,19 @@
 
 The :class:`~yt.visualization.volume_rendering.lens.SphericalLens` produces
 a cylindrical-spherical projection.  Movies rendered in this way can be
-displayed in head-tracking devices (e.g. Oculus Rift) or in YouTube 360 view
-(for more information see `the YouTube help
-<https://support.google.com/youtube/answer/6178631?hl=en>`, but it's a
-simple matter of running a script on an encoded movie file.)
+displayed as YouTube 360-degree videos (for more information see
+`the YouTube help: Upload 360-degree videos
+<https://support.google.com/youtube/answer/6178631?hl=en>`_).
 :class:`~yt.visualization.volume_rendering.lens.StereoSphericalLens`
 is identical to :class:`~yt.visualization.volume_rendering.lens.SphericalLens`
-but it produces two images from nearby camera positions for use in 3D viewing.
+but it produces two images from nearby camera positions for virtual reality
+movies, which can be displayed in head-tracking devices (e.g. Oculus Rift)
+or in mobile YouTube app with Google Cardboard (for more information
+see `the YouTube help: Upload virtual reality videos
+<https://support.google.com/youtube/answer/6316263?hl=en>`_).
+`This virtual reality video
+<https://youtu.be/ZYWY53X7UQE>`_ on YouTube is an example produced with
+:class:`~yt.visualization.volume_rendering.lens.StereoSphericalLens`.
 
 .. _annotated-vr-example:
 


https://bitbucket.org/yt_analysis/yt/commits/c117d528f18e/
Changeset:   c117d528f18e
Branch:      yt
User:        MatthewTurk
Date:        2016-06-01 20:28:37+00:00
Summary:     Merged in jisuoqing/yt (pull request #2182)

[bugfix] Bug fix and improvement for generating Google Cardboard VR in StereoSphericalLens
Affected #:  5 files

diff -r 6873d6aa7e0a9ac68941b52692b8287a25869160 -r c117d528f18ea412f8b80e8522a0595444e20ae8 doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -85,6 +85,7 @@
 cam = sc.add_camera(ds, lens_type='spherical')
 # Set the size ratio of the final projection to be 2:1, since spherical lens
 # will generate the final image with length of 2*pi and height of pi.
+# Recommended resolution for YouTube 360-degree videos is [3840, 2160]
 cam.resolution = [500, 250]
 # Standing at (x=0.4, y=0.5, z=0.5), we look in all the radial directions
 # from this point in spherical coordinate.
@@ -99,9 +100,11 @@
 
 # Stereo-spherical lens
 cam = sc.add_camera(ds, lens_type='stereo-spherical')
-# Set the size ratio of the final projection to be 4:1, since spherical-perspective lens
-# will generate the final image with both left-eye and right-eye ones jointed together.
-cam.resolution = [1000, 250]
+# Set the size ratio of the final projection to be 1:1, since spherical-perspective lens
+# will generate the final image with both left-eye and right-eye ones jointed together,
+# with left-eye image on top and right-eye image on bottom.
+# Recommended resolution for YouTube virtual reality videos is [3840, 2160]
+cam.resolution = [500, 500]
 cam.position = ds.arr([0.4, 0.5, 0.5], 'code_length')
 cam.switch_orientation(normal_vector=normal_vector,
                        north_vector=north_vector)

diff -r 6873d6aa7e0a9ac68941b52692b8287a25869160 -r c117d528f18ea412f8b80e8522a0595444e20ae8 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -31,8 +31,8 @@
 as grid or continent lines, and then to render a production-quality
 visualization.  By changing the "lens" used, a single camera path can output
 images suitable for planetarium domes, immersive and head tracking systems
-(such as the Oculus Rift or recent "spherical" movie viewers such as the
-mobile YouTube app), as well as standard screens.
+(such as the Oculus Rift or recent 360-degree/virtual reality movie viewers
+such as the mobile YouTube app), as well as standard screens.
 
 .. image:: _images/scene_diagram.svg
    :width: 50%
@@ -327,13 +327,19 @@
 
 The :class:`~yt.visualization.volume_rendering.lens.SphericalLens` produces
 a cylindrical-spherical projection.  Movies rendered in this way can be
-displayed in head-tracking devices (e.g. Oculus Rift) or in YouTube 360 view
-(for more information see `the YouTube help
-<https://support.google.com/youtube/answer/6178631?hl=en>`, but it's a
-simple matter of running a script on an encoded movie file.)
+displayed as YouTube 360-degree videos (for more information see
+`the YouTube help: Upload 360-degree videos
+<https://support.google.com/youtube/answer/6178631?hl=en>`_).
 :class:`~yt.visualization.volume_rendering.lens.StereoSphericalLens`
 is identical to :class:`~yt.visualization.volume_rendering.lens.SphericalLens`
-but it produces two images from nearby camera positions for use in 3D viewing.
+but it produces two images from nearby camera positions for virtual reality
+movies, which can be displayed in head-tracking devices (e.g. Oculus Rift)
+or in mobile YouTube app with Google Cardboard (for more information
+see `the YouTube help: Upload virtual reality videos
+<https://support.google.com/youtube/answer/6316263?hl=en>`_).
+`This virtual reality video
+<https://youtu.be/ZYWY53X7UQE>`_ on YouTube is an example produced with
+:class:`~yt.visualization.volume_rendering.lens.StereoSphericalLens`.
 
 .. _annotated-vr-example:
 

diff -r 6873d6aa7e0a9ac68941b52692b8287a25869160 -r c117d528f18ea412f8b80e8522a0595444e20ae8 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -18,7 +18,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 from yt.data_objects.image_array import ImageArray
-from yt.units.yt_array import unorm, uvstack
+from yt.units.yt_array import unorm, uvstack, uhstack
 from yt.utilities.math_utils import get_rotation_matrix
 import numpy as np
 
@@ -736,13 +736,13 @@
         if self.disparity is None:
             self.disparity = camera.width[0] / 1000.
 
-        single_resolution_x = int(np.floor(camera.resolution[0]) / 2)
-        px = np.linspace(-np.pi, np.pi, single_resolution_x,
+        single_resolution_y = int(np.floor(camera.resolution[1]) / 2)
+        px = np.linspace(-np.pi, np.pi, camera.resolution[0],
                          endpoint=True)[:, None]
-        py = np.linspace(-np.pi/2., np.pi/2., camera.resolution[1],
+        py = np.linspace(-np.pi/2., np.pi/2., single_resolution_y,
                          endpoint=True)[None, :]
 
-        vectors = np.zeros((single_resolution_x, camera.resolution[1], 3),
+        vectors = np.zeros((camera.resolution[0], single_resolution_y, 3),
                            dtype='float64', order='C')
         vectors[:, :, 0] = np.cos(px) * np.cos(py)
         vectors[:, :, 1] = np.sin(px) * np.cos(py)
@@ -755,29 +755,34 @@
         # Rescale the ray to be long enough to cover the entire domain
         vectors = vectors * max_length
 
-        vectors2 = np.zeros((single_resolution_x, camera.resolution[1], 3),
+        R1 = get_rotation_matrix(0.5*np.pi, [1, 0, 0])
+        R2 = get_rotation_matrix(0.5*np.pi, [0, 0, 1])
+        uv = np.dot(R1, camera.unit_vectors)
+        uv = np.dot(R2, uv)
+
+        vectors.reshape((camera.resolution[0]*single_resolution_y, 3))
+        vectors = np.dot(vectors, uv)
+        vectors.reshape((camera.resolution[0], single_resolution_y, 3))
+
+        vectors2 = np.zeros((camera.resolution[0], single_resolution_y, 3),
                             dtype='float64', order='C')
-        vectors2[:, :, 0] = -np.sin(px) * np.ones((1, camera.resolution[1]))
-        vectors2[:, :, 1] = np.cos(px) * np.ones((1, camera.resolution[1]))
+        vectors2[:, :, 0] = -np.sin(px) * np.ones((1, single_resolution_y))
+        vectors2[:, :, 1] = np.cos(px) * np.ones((1, single_resolution_y))
         vectors2[:, :, 2] = 0
 
+        vectors2.reshape((camera.resolution[0]*single_resolution_y, 3))
+        vectors2 = np.dot(vectors2, uv)
+        vectors2.reshape((camera.resolution[0], single_resolution_y, 3))
+
         positions = np.tile(
-            camera.position, single_resolution_x * camera.resolution[1])
+            camera.position, camera.resolution[0] * single_resolution_y)
         positions = positions.reshape(
-            single_resolution_x, camera.resolution[1], 3)
+            camera.resolution[0], single_resolution_y, 3)
 
         # The left and right are switched here since VR is in LHS.
         positions_left = positions + vectors2 * self.disparity
         positions_right = positions + vectors2 * (-self.disparity)
 
-        R1 = get_rotation_matrix(0.5*np.pi, [1, 0, 0])
-        R2 = get_rotation_matrix(0.5*np.pi, [0, 0, 1])
-        uv = np.dot(R1, camera.unit_vectors)
-        uv = np.dot(R2, uv)
-        vectors.reshape((single_resolution_x*camera.resolution[1], 3))
-        vectors = np.dot(vectors, uv)
-        vectors.reshape((single_resolution_x, camera.resolution[1], 3))
-
         if render_source.zbuffer is not None:
             image = render_source.zbuffer.rgba
         else:
@@ -785,8 +790,8 @@
 
         dummy = np.ones(3, dtype='float64')
 
-        vectors_comb = uvstack([vectors, vectors])
-        positions_comb = uvstack([positions_left, positions_right])
+        vectors_comb = uhstack([vectors, vectors])
+        positions_comb = uhstack([positions_left, positions_right])
 
         image.shape = (camera.resolution[0], camera.resolution[1], 4)
         vectors_comb.shape = (camera.resolution[0], camera.resolution[1], 3)

diff -r 6873d6aa7e0a9ac68941b52692b8287a25869160 -r c117d528f18ea412f8b80e8522a0595444e20ae8 yt/visualization/volume_rendering/tests/test_lenses.py
--- a/yt/visualization/volume_rendering/tests/test_lenses.py
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -116,7 +116,7 @@
         w = self.ds.arr(w, 'code_length')
         sc = Scene()
         cam = sc.add_camera(self.ds, lens_type='stereo-spherical')
-        cam.resolution = [1024, 256]
+        cam.resolution = [512, 512]
         cam.position = self.ds.arr(np.array([0.6, 0.5, 0.5]), 'code_length')
         vol = VolumeSource(self.ds, field=self.field)
         tf = vol.transfer_function

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list