[yt-svn] commit/yt: chummels: Merged in MatthewTurk/yt (pull request #1598)

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Apr 6 19:53:50 PDT 2016


1 new commit in yt:

https://bitbucket.org/yt_analysis/yt/commits/a0f4cce7a394/
Changeset:   a0f4cce7a394
Branch:      yt
User:        chummels
Date:        2016-04-07 02:53:38+00:00
Summary:     Merged in MatthewTurk/yt (pull request #1598)

OpenGL volume rendering
Affected #:  28 files

diff -r e1b2e0a5997f0cce7ff52547347e9e5214ad0d5c -r a0f4cce7a394e91bbb5e6b541c2232361d5de737 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py
+include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py CONTRIBUTING.rst
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js
@@ -12,4 +12,5 @@
 prune doc/source/reference/api/generated
 prune doc/build
 recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
+recursive-include yt/visualization/volume_rendering/shaders *.fragmentshader *.vertexshader
 prune yt/frontends/_skeleton

diff -r e1b2e0a5997f0cce7ff52547347e9e5214ad0d5c -r a0f4cce7a394e91bbb5e6b541c2232361d5de737 doc/helper_scripts/run_recipes.py
--- a/doc/helper_scripts/run_recipes.py
+++ b/doc/helper_scripts/run_recipes.py
@@ -19,7 +19,7 @@
 CWD = os.getcwd()
 ytcfg["yt", "serialize"] = "False"
 PARALLEL_TEST = {"rockstar_nest": "3"}
-BLACKLIST = []
+BLACKLIST = ["opengl_ipython", "opengl_vr"]
 
 
 def prep_dirs():

diff -r e1b2e0a5997f0cce7ff52547347e9e5214ad0d5c -r a0f4cce7a394e91bbb5e6b541c2232361d5de737 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -303,6 +303,25 @@
 
 .. yt_cookbook:: vol-annotated.py
 
+.. _cookbook-opengl_vr:
+
+Advanced Interactive Volume Rendering
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to manually create all components required to
+start the Interactive Volume Rendering.
+
+.. yt_cookbook:: opengl_vr.py
+
+
+Embedding Interactive Volume Rendering
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to embed the Interactive Volume Rendering inside
+the Jupyter notebook.
+
+.. yt_cookbook:: opengl_ipython.py
+
 Plotting Streamlines
 ~~~~~~~~~~~~~~~~~~~~
 

diff -r e1b2e0a5997f0cce7ff52547347e9e5214ad0d5c -r a0f4cce7a394e91bbb5e6b541c2232361d5de737 doc/source/cookbook/opengl_ipython.py
--- /dev/null
+++ b/doc/source/cookbook/opengl_ipython.py
@@ -0,0 +1,29 @@
+import yt
+from yt.visualization.volume_rendering.interactive_vr import \
+    SceneGraph, BlockCollection, TrackballCamera
+from yt.visualization.volume_rendering.interactive_loop import \
+    RenderingContext
+from yt.visualization.volume_rendering import glfw_inputhook 
+
+rc = RenderingContext(1280, 960)
+
+scene = SceneGraph()
+collection = BlockCollection()
+
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+
+dd = ds.all_data()
+collection.add_data(dd, "density")
+
+scene.add_collection(collection)
+
+position = (1.0, 1.0, 1.0)
+c = TrackballCamera(position=position, focus=ds.domain_center,
+                    near_plane=0.1)
+
+callbacks = rc.setup_loop(scene, c)
+rl = rc(scene, c, callbacks)
+
+# To make this work from IPython execute:
+#
+# glfw_inputhook.inputhook_manager.enable_gui("glfw", app=rl)

diff -r e1b2e0a5997f0cce7ff52547347e9e5214ad0d5c -r a0f4cce7a394e91bbb5e6b541c2232361d5de737 doc/source/cookbook/opengl_vr.py
--- /dev/null
+++ b/doc/source/cookbook/opengl_vr.py
@@ -0,0 +1,27 @@
+import yt
+from yt.visualization.volume_rendering.interactive_vr import \
+    SceneGraph, BlockCollection, TrackballCamera
+from yt.visualization.volume_rendering.interactive_loop import \
+    RenderingContext
+
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+
+# Create GLUT window
+rc = RenderingContext(1280, 960)
+
+# Create a 3d Texture from all_data()
+collection = BlockCollection()
+dd = ds.all_data()
+collection.add_data(dd, "density")
+
+# Initiliaze basic Scene and pass the data
+scene = SceneGraph()
+scene.add_collection(collection)
+
+# Create default camera
+position = (1.0, 1.0, 1.0)
+c = TrackballCamera(position=position, focus=ds.domain_center,
+                    near_plane=0.1)
+
+# Start rendering loop
+rc.start_loop(scene, c)

diff -r e1b2e0a5997f0cce7ff52547347e9e5214ad0d5c -r a0f4cce7a394e91bbb5e6b541c2232361d5de737 doc/source/visualizing/_images/idv.jpg
Binary file doc/source/visualizing/_images/idv.jpg has changed

diff -r e1b2e0a5997f0cce7ff52547347e9e5214ad0d5c -r a0f4cce7a394e91bbb5e6b541c2232361d5de737 doc/source/visualizing/index.rst
--- a/doc/source/visualizing/index.rst
+++ b/doc/source/visualizing/index.rst
@@ -16,6 +16,7 @@
    manual_plotting
    volume_rendering
    unstructured_mesh_rendering
+   interactive_data_visualization
    sketchfab
    mapserver
    streamlines

diff -r e1b2e0a5997f0cce7ff52547347e9e5214ad0d5c -r a0f4cce7a394e91bbb5e6b541c2232361d5de737 doc/source/visualizing/interactive_data_visualization.rst
--- /dev/null
+++ b/doc/source/visualizing/interactive_data_visualization.rst
@@ -0,0 +1,102 @@
+.. _interactive_data_visualization:
+
+Interactive Data Visualization
+==============================
+
+In version 3.3 of yt, an experimental, hardware-accelerated interactive volume
+renderer was introduced.  This interactive renderer is based on OpenGL and
+natively understands adaptive mesh refinement data; this enables
+(GPU) memory-efficient loading of data.  The data is copied from CPU memory
+onto the GPU as a series of 3D textures, which are then rendered to an
+interactive window.  The window itself is the view from a conceptual "camera",
+which can be rotated, zoomed, and so on.  The color of each displayed pixel is
+computed by a "fragment shader" which is executed on each grid that is
+displayed.  The fragment shaders currently implemented in yt enable computing
+(and then mapping to a colormap) the maximum value along each pixel's line of
+sight and an unweighted integration of values along each pixel's line of sight
+(and subsequent mapping to a colormap.)  An experimental transfer function
+shader has been implemented, but is not yet functioning correctly.  For more
+information, see :ref:`projection-types`.
+
+A comprehensive description of the OpenGL volume rendering is beyond the scope
+of this document. However, a more detailed explanation can be found in `this
+guide <https://open.gl/>`_.
+
+Much of the Interactive Data Visualization (IDV) interface is designed to
+mimic the interface available for software volume rendering (see
+:ref:`volume_rendering`) so that in future versions API compatibility may lead
+to greater code reuse both for scripts that create visualizations and for
+internal visualization objects.
+
+Installation
+^^^^^^^^^^^^
+
+In order to use Interactive Data Visualization (IDV) you need to install
+`PyOpenGL <https://pypi.python.org/pypi/PyOpenGL>`_ and `cyglfw3
+<https://pypi.python.org/pypi/cyglfw3/>`_ along with their respective
+dependencies, e.g. `glfw3 <http://www.glfw.org/>`_ is required to be installed
+before you can ``pip install cyglfw3``. Please carefully read installation
+instructions provided on pypi pages of both packages. 
+
+If you are using conda, ``cyglfw3`` is provided in our conda channel
+(``pyopengl`` is shipped by Continuum already) and can be installed via:
+
+.. code-block:: bash
+
+    conda install -c http://use.yt/with_conda/ cyglfw3 pyopengl
+
+Using the interactive renderer
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+You can simply pass dataset to :meth:`~yt.interactive_render`. By default
+it will load all data and render gas density:
+
+.. code-block:: python
+
+    import yt
+    
+    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    yt.interactive_render(ds)
+
+Alternatively you can provide a data object as a first argument to
+:meth:`~yt.interactive_render` if your dataset is too big to fit GPU memory:
+
+.. code-block:: python
+
+    import yt
+
+    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    sp = ds.sphere("max", (0.1, "Mpc"))
+
+    cam_pos = ds.arr([0.1, 0.1, 0.1], "Mpc").in_units("code_length")
+    yt.interactive_render(sp, field="pressure", cam_position=cam_pos,
+                          window_size=(512, 512))
+
+A successful call to :meth:`~yt.interactive_render` should create a new window
+called *vol_render*. 
+
+.. image:: _images/idv.jpg
+   :width: 1000
+
+By default it renders a Maximum Intensity Projection of the density field (see
+:ref:`projection-types` for more information). The rendering can be
+dynamically modified using the following keybindings:
+
+1
+   Switch to MIP fragment shader
+2
+   Switch to integration fragement shader
+L
+   Switch between linear and logarithmic scales
+W
+   Zoom in the camera
+S
+   Zoom out the camera
+C
+   Change the colormap
+
+Pressing the *h* key will print all the available key bindings in a terminal window.
+The camera can be moved around by holding a left mouse button while moving the mouse.
+
+More advanced initialization of interactive volume renderer can be found in
+:ref:`cookbook-opengl_vr`.

diff -r e1b2e0a5997f0cce7ff52547347e9e5214ad0d5c -r a0f4cce7a394e91bbb5e6b541c2232361d5de737 setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,6 +30,10 @@
         files += glob.glob("%s/*.%s" % (dir_name, ext))
     MAPSERVER_FILES.append((dir_name, files))
 
+SHADERS_DIR = os.path.join("yt", "visualization", "volume_rendering", "shaders")
+SHADERS_FILES = glob.glob(os.path.join(SHADERS_DIR, "*.vertexshader")) + \
+    glob.glob(os.path.join(SHADERS_DIR, "*.fragmentshader"))
+
 VERSION = "3.3.dev0"
 
 if os.path.exists('MANIFEST'):
@@ -399,7 +403,7 @@
     license="BSD",
     zip_safe=False,
     scripts=["scripts/iyt"],
-    data_files=MAPSERVER_FILES,
+    data_files=MAPSERVER_FILES + SHADERS_FILES,
     ext_modules=cython_extensions + extensions
 )
 

diff -r e1b2e0a5997f0cce7ff52547347e9e5214ad0d5c -r a0f4cce7a394e91bbb5e6b541c2232361d5de737 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -159,7 +159,7 @@
 
 from yt.visualization.volume_rendering.api import \
     volume_render, create_scene, ColorTransferFunction, TransferFunction, \
-    off_axis_projection
+    off_axis_projection, interactive_render
 import yt.visualization.volume_rendering.api as volume_rendering
 #    TransferFunctionHelper, MultiVariateTransferFunction
 #    off_axis_projection

diff -r e1b2e0a5997f0cce7ff52547347e9e5214ad0d5c -r a0f4cce7a394e91bbb5e6b541c2232361d5de737 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -199,7 +199,7 @@
         self.unit = unit
         self.units_base = units_base
         YTException.__init__(self)
-        
+
     def __str__(self):
         err = "The unit '%s' cannot be reduced to a single expression within " \
           "the %s base system of units." % (self.unit, self.units_base)
@@ -210,13 +210,13 @@
         self.old_units = old_units
         self.new_units = new_units
         self.base = base
-    
+
     def __str__(self):
         err = "It looks like you're trying to convert between '%s' and '%s'. Try " \
-          "using \"to_equivalent('%s', '%s')\" instead." % (self.old_units, self.new_units, 
+          "using \"to_equivalent('%s', '%s')\" instead." % (self.old_units, self.new_units,
                                                             self.new_units, self.base)
         return err
-    
+
 class YTUfuncUnitError(YTException):
     def __init__(self, ufunc, unit1, unit2):
         self.ufunc = ufunc
@@ -537,3 +537,24 @@
     def __str__(self):
         return 'Dimensionality specified was %s but we need %s' % (
             self.wrong, self.right)
+
+class YTInvalidShaderType(YTException):
+    def __init__(self, source):
+        self.source = source
+
+    def __str__(self):
+        return "Can't identify shader_type for file '%s.'" % (self.source)
+
+class YTUnknownUniformKind(YTException):
+    def __init__(self, kind):
+        self.kind = kind
+
+    def __str__(self):
+        return "Can't determine kind specification for %s" % (self.kind)
+
+class YTUnknownUniformSize(YTException):
+    def __init__(self, size_spec):
+        self.size_spec = size_spec
+
+    def __str__(self):
+        return "Can't determine size specification for %s" % (self.size_spec)

diff -r e1b2e0a5997f0cce7ff52547347e9e5214ad0d5c -r a0f4cce7a394e91bbb5e6b541c2232361d5de737 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -63,7 +63,7 @@
 
     ds : Dataset
         A simulation static output.
-    
+
     Examples
     --------
     >>> a = np.array([1.1, 0.5, 0.5])
@@ -73,23 +73,23 @@
     >>> ppos
     array([ 0.1,  0.5,  0.5])
     """
- 
+
     off = (pos - ds.domain_left_edge) % ds.domain_width
     return ds.domain_left_edge + off
 
 def periodic_dist(a, b, period, periodicity=(True, True, True)):
     r"""Find the Euclidean periodic distance between two sets of points.
-    
+
     Parameters
     ----------
     a : array or list
         Either an ndim long list of coordinates corresponding to a single point
         or an (ndim, npoints) list of coordinates for many points in space.
-    
+
     b : array of list
         Either an ndim long list of coordinates corresponding to a single point
         or an (ndim, npoints) list of coordinates for many points in space.
-    
+
     period : float or array or list
         If the volume is symmetrically periodic, this can be a single float,
         otherwise an array or list of floats giving the periodic size of the
@@ -115,9 +115,9 @@
     if period.size == 1:
         period = np.array([period, period, period])
 
-    if a.shape != b.shape: 
+    if a.shape != b.shape:
         raise RuntimeError("Arrays must be the same shape.")
-    
+
     if period.shape != a.shape and len(a.shape) > 1:
         n_tup = tuple([1 for i in range(a.ndim-1)])
         period = np.tile(np.reshape(period, (a.shape[0],)+n_tup), (1,)+a.shape[1:])
@@ -128,7 +128,7 @@
 
     c = np.empty((2,) + a.shape, dtype="float64")
     c[0,:] = np.abs(a - b)
-    
+
     p_directions = [i for i,p in enumerate(periodicity) if p is True]
     np_directions = [i for i,p in enumerate(periodicity) if p is False]
     for d in p_directions:
@@ -181,25 +181,25 @@
 
 def rotate_vector_3D(a, dim, angle):
     r"""Rotates the elements of an array around an axis by some angle.
-    
+
     Given an array of 3D vectors a, this rotates them around a coordinate axis
     by a clockwise angle. An alternative way to think about it is the
     coordinate axes are rotated counterclockwise, which changes the directions
     of the vectors accordingly.
-    
+
     Parameters
     ----------
     a : array
         An array of 3D vectors with dimension Nx3.
-    
+
     dim : integer
         A integer giving the axis around which the vectors will be rotated.
         (x, y, z) = (0, 1, 2).
-    
+
     angle : float
         The angle in radians through which the vectors will be rotated
         clockwise.
-    
+
     Examples
     --------
     >>> a = np.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [3, 4, 5]])
@@ -210,7 +210,7 @@
     [  1.00000000e+00   6.12323400e-17   1.00000000e+00]
     [  1.00000000e+00  -1.00000000e+00   1.00000000e+00]
     [  4.00000000e+00  -3.00000000e+00   5.00000000e+00]]
-    
+
     """
     mod = False
     if len(a.shape) == 1:
@@ -236,12 +236,11 @@
         return np.dot(R, a.T).T[0]
     else:
         return np.dot(R, a.T).T
-    
 
 def modify_reference_frame(CoM, L, P=None, V=None):
     r"""Rotates and translates data into a new reference frame to make
     calculations easier.
-    
+
     This is primarily useful for calculations of halo data.
     The data is translated into the center of mass frame.
     Next, it is rotated such that the angular momentum vector for the data
@@ -249,18 +248,18 @@
     momentum vector on the data that comes out of this function, it will
     always be along the positive z-axis.
     If the center of mass is re-calculated, it will be at the origin.
-    
+
     Parameters
     ----------
     CoM : array
         The center of mass in 3D.
-    
+
     L : array
         The angular momentum vector.
-    
+
     Optional
     --------
-        
+
     P : array
         The positions of the data to be modified (i.e. particle or grid cell
         postions). The array should be Nx3.
@@ -268,18 +267,18 @@
     V : array
         The velocities of the data to be modified (i.e. particle or grid cell
         velocities). The array should be Nx3.
-    
+
     Returns
     -------
     L : array
         The angular momentum vector equal to [0, 0, 1] modulo machine error.
-    
+
     P : array
         The modified positional data. Only returned if P is not None
-    
+
     V : array
         The modified velocity data. Only returned if V is not None
-    
+
     Examples
     --------
     >>> CoM = np.array([0.5, 0.5, 0.5])
@@ -359,36 +358,36 @@
 
 def compute_rotational_velocity(CoM, L, P, V):
     r"""Computes the rotational velocity for some data around an axis.
-    
+
     This is primarily for halo computations.
     Given some data, this computes the circular rotational velocity of each
     point (particle) in reference to the axis defined by the angular momentum
     vector.
     This is accomplished by converting the reference frame of the center of
     mass of the halo.
-    
+
     Parameters
     ----------
     CoM : array
         The center of mass in 3D.
-    
+
     L : array
         The angular momentum vector.
-    
+
     P : array
         The positions of the data to be modified (i.e. particle or grid cell
         postions). The array should be Nx3.
-    
+
     V : array
         The velocities of the data to be modified (i.e. particle or grid cell
         velocities). The array should be Nx3.
-    
+
     Returns
     -------
     v : array
         An array N elements long that gives the circular rotational velocity
         for each datum (particle).
-    
+
     Examples
     --------
     >>> CoM = np.array([0, 0, 0])
@@ -412,38 +411,38 @@
         temp = np.dot(rp, V[i]) / np.dot(rp, rp) * rp
         res[i] = np.dot(temp, temp)**0.5
     return res
-    
+
 def compute_parallel_velocity(CoM, L, P, V):
     r"""Computes the parallel velocity for some data around an axis.
-    
+
     This is primarily for halo computations.
     Given some data, this computes the velocity component along the angular
     momentum vector.
     This is accomplished by converting the reference frame of the center of
     mass of the halo.
-    
+
     Parameters
     ----------
     CoM : array
         The center of mass in 3D.
-    
+
     L : array
         The angular momentum vector.
-    
+
     P : array
         The positions of the data to be modified (i.e. particle or grid cell
         postions). The array should be Nx3.
-    
+
     V : array
         The velocities of the data to be modified (i.e. particle or grid cell
         velocities). The array should be Nx3.
-    
+
     Returns
     -------
     v : array
         An array N elements long that gives the parallel velocity for
         each datum (particle).
-    
+
     Examples
     --------
     >>> CoM = np.array([0, 0, 0])
@@ -453,7 +452,7 @@
     >>> paraV = compute_parallel_velocity(CoM, L, P, V)
     >>> paraV
     array([10, -1,  1, -1])
-    
+
     """
     # First we translate into the simple coordinates.
     L, P, V = modify_reference_frame(CoM, L, P, V)
@@ -462,34 +461,34 @@
 
 def compute_radial_velocity(CoM, L, P, V):
     r"""Computes the radial velocity for some data around an axis.
-    
+
     This is primarily for halo computations.
     Given some data, this computes the radial velocity component for the data.
     This is accomplished by converting the reference frame of the center of
     mass of the halo.
-    
+
     Parameters
     ----------
     CoM : array
         The center of mass in 3D.
-    
+
     L : array
         The angular momentum vector.
-    
+
     P : array
         The positions of the data to be modified (i.e. particle or grid cell
         postions). The array should be Nx3.
-    
+
     V : array
         The velocities of the data to be modified (i.e. particle or grid cell
         velocities). The array should be Nx3.
-    
+
     Returns
     -------
     v : array
         An array N elements long that gives the radial velocity for
         each datum (particle).
-    
+
     Examples
     --------
     >>> CoM = np.array([0, 0, 0])
@@ -499,7 +498,7 @@
     >>> radV = compute_radial_velocity(CoM, L, P, V)
     >>> radV
     array([ 1.        ,  1.41421356 ,  0.        ,  0.])
-    
+
     """
     # First we translate into the simple coordinates.
     L, P, V = modify_reference_frame(CoM, L, P, V)
@@ -516,34 +515,34 @@
 def compute_cylindrical_radius(CoM, L, P, V):
     r"""Compute the radius for some data around an axis in cylindrical
     coordinates.
-    
+
     This is primarily for halo computations.
     Given some data, this computes the cylindrical radius for each point.
     This is accomplished by converting the reference frame of the center of
     mass of the halo.
-    
+
     Parameters
     ----------
     CoM : array
         The center of mass in 3D.
-    
+
     L : array
         The angular momentum vector.
-    
+
     P : array
         The positions of the data to be modified (i.e. particle or grid cell
         postions). The array should be Nx3.
-    
+
     V : array
         The velocities of the data to be modified (i.e. particle or grid cell
         velocities). The array should be Nx3.
-    
+
     Returns
     -------
     cyl_r : array
         An array N elements long that gives the radial velocity for
         each datum (particle).
-    
+
     Examples
     --------
     >>> CoM = np.array([0, 0, 0])
@@ -553,6 +552,7 @@
     >>> cyl_r = compute_cylindrical_radius(CoM, L, P, V)
     >>> cyl_r
     array([ 1.        ,  1.41421356,  0.        ,  1.41421356])
+
     """
     # First we translate into the simple coordinates.
     L, P, V = modify_reference_frame(CoM, L, P, V)
@@ -560,19 +560,19 @@
     # calculation very easy.
     P[:,2] = 0
     return np.sqrt((P * P).sum(axis=1))
-    
+
 def ortho_find(vec1):
     r"""Find two complementary orthonormal vectors to a given vector.
 
-    For any given non-zero vector, there are infinite pairs of vectors 
-    orthonormal to it.  This function gives you one arbitrary pair from 
+    For any given non-zero vector, there are infinite pairs of vectors
+    orthonormal to it.  This function gives you one arbitrary pair from
     that set along with the normalized version of the original vector.
 
     Parameters
     ----------
     vec1 : array_like
            An array or list to represent a 3-vector.
-        
+
     Returns
     -------
     vec1 : array
@@ -593,30 +593,30 @@
     -----
     Our initial vector is `vec1` which consists of 3 components: `x1`, `y1`,
     and `z1`.  ortho_find determines a vector, `vec2`, which is orthonormal
-    to `vec1` by finding a vector which has a zero-value dot-product with 
+    to `vec1` by finding a vector which has a zero-value dot-product with
     `vec1`.
 
-    .. math:: 
+    .. math::
 
        vec1 \cdot vec2 = x_1 x_2 + y_1 y_2 + z_1 z_2 = 0
 
-    As a starting point, we arbitrarily choose `vec2` to have `x2` = 1, 
+    As a starting point, we arbitrarily choose `vec2` to have `x2` = 1,
     `y2` = 0:
 
-    .. math:: 
+    .. math::
 
-       vec1 \cdot vec2 = x_1 + (z_1 z_2) = 0 
+       vec1 \cdot vec2 = x_1 + (z_1 z_2) = 0
 
        \rightarrow z_2 = -(x_1 / z_1)
 
-    Of course, this will fail if `z1` = 0, in which case, let's say use 
+    Of course, this will fail if `z1` = 0, in which case, let's say use
     `z2` = 1 and `x2` = 0:
 
     .. math::
-    
+
        \rightarrow y_2 = -(z_1 / y_1)
 
-    Similarly, if `y1` = 0, this case will fail, in which case we use 
+    Similarly, if `y1` = 0, this case will fail, in which case we use
     `y2` = 1 and `z2` = 0:
 
     .. math::
@@ -675,7 +675,7 @@
     alone a specified axis.  Check numpy.median for details, as it is
     virtually the same algorithm.
 
-    Returns an array of the quartiles of the array elements [lower quartile, 
+    Returns an array of the quartiles of the array elements [lower quartile,
     upper quartile].
 
     Parameters
@@ -713,9 +713,9 @@
 
     Notes
     -----
-    Given a vector V of length N, the quartiles of V are the 25% and 75% values 
-    of a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/4]`` and 
-    ``3*V_sorted[(N-1)/4]``, when N is odd.  When N is even, it is the average 
+    Given a vector V of length N, the quartiles of V are the 25% and 75% values
+    of a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/4]`` and
+    ``3*V_sorted[(N-1)/4]``, when N is odd.  When N is even, it is the average
     of the two values bounding these values of ``V_sorted``.
 
     Examples
@@ -772,6 +772,260 @@
         result.append(np.mean(sorted[indexer], axis=axis, out=out))
     return np.array(result)
 
+def get_perspective_matrix(fovy, aspect, z_near, z_far):
+    """
+    Given a field of view in radians, an aspect ratio, and a near
+    and far plane distance, this routine computes the transformation matrix
+    corresponding to perspective projection using homogenous coordinates.
+
+    Parameters
+    ----------
+    fovy : scalar
+        The angle in degrees of the field of view.
+
+    aspect : scalar
+        The aspect ratio of width / height for the projection.
+
+    z_near : scalar
+        The distance of the near plane from the camera.
+
+    z_far : scalar
+        The distance of the far plane from the camera.
+
+    Returns
+    -------
+    persp_matrix : ndarray
+        A new 4x4 2D array. Represents a perspective transformation
+        in homogeneous coordinates. Note that this matrix does not
+        actually perform the projection. After multiplying a 4D
+        vector of the form (x_0, y_0, z_0, 1.0), the point will be
+        transformed to some (x_1, y_1, z_1, w). The final projection
+        is applied by performing a divide by w, that is
+        (x_1/w, y_1/w, z_1/w, w/w). The matrix uses a row-major
+        ordering, rather than the column major ordering typically
+        used by OpenGL.
+
+    Notes
+    -----
+    The usage of 4D homogeneous coordinates is for OpenGL and GPU
+    hardware that automatically performs the divide by w operation.
+    See the following for more details about the OpenGL perpective matrices.
+
+    http://www.tomdalling.com/blog/modern-opengl/explaining-homogenous-coordinates-and-projective-geometry/
+    http://www.songho.ca/opengl/gl_projectionmatrix.html
+
+    """
+
+    tan_half_fovy = np.tan(np.radians(fovy) / 2)
+
+    result = np.zeros( (4, 4), dtype = 'float32', order = 'C')
+    #result[0][0] = 1 / (aspect * tan_half_fovy)
+    #result[1][1] = 1 / tan_half_fovy
+    #result[2][2] = - (z_far + z_near) / (z_far - z_near)
+    #result[3][2] = -1
+    #result[2][3] = -(2 * z_far * z_near) / (z_far - z_near)
+
+    f = z_far
+    n = z_near
+
+    t = tan_half_fovy * n
+    b = -t * aspect
+    r = t * aspect
+    l = - t *  aspect
+
+    result[0][0] = (2 * n) / (r - l)
+    result[2][0] = (r + l) / (r - l)
+    result[1][1] = (2 * n) / (t - b)
+    result[1][2] = (t + b) / (t - b)
+    result[2][2] = -(f + n) / (f - n)
+    result[2][3] = -2*f*n/(f - n)
+    result[3][2] = -1
+
+    return result
+
+def get_orthographic_matrix(maxr, aspect, z_near, z_far):
+    """
+    Given a field of view in radians, an aspect ratio, and a near
+    and far plane distance, this routine computes the transformation matrix
+    corresponding to perspective projection using homogenous coordinates.
+
+    Parameters
+    ----------
+    maxr : scalar
+        should be max(|x|, |y|)
+
+    aspect : scalar
+        The aspect ratio of width / height for the projection.
+
+    z_near : scalar
+        The distance of the near plane from the camera.
+
+    z_far : scalar
+        The distance of the far plane from the camera.
+
+    Returns
+    -------
+    persp_matrix : ndarray
+        A new 4x4 2D array. Represents a perspective transformation
+        in homogeneous coordinates. Note that this matrix does not
+        actually perform the projection. After multiplying a 4D
+        vector of the form (x_0, y_0, z_0, 1.0), the point will be
+        transformed to some (x_1, y_1, z_1, w). The final projection
+        is applied by performing a divide by w, that is
+        (x_1/w, y_1/w, z_1/w, w/w). The matrix uses a row-major
+        ordering, rather than the column major ordering typically
+        used by OpenGL.
+
+    Notes
+    -----
+    The usage of 4D homogeneous coordinates is for OpenGL and GPU
+    hardware that automatically performs the divide by w operation.
+    See the following for more details about the OpenGL perpective matrices.
+
+    http://www.scratchapixel.com/lessons/3d-basic-rendering/perspective-and-orthographic-projection-matrix/orthographic-projection-matrix
+    http://www.tomdalling.com/blog/modern-opengl/explaining-homogenous-coordinates-and-projective-geometry/
+    http://www.songho.ca/opengl/gl_projectionmatrix.html
+
+    """
+
+    r = maxr * aspect
+    t = maxr
+    l = -r
+    b = -t
+
+    result = np.zeros( (4, 4), dtype = 'float32', order = 'C')
+    result[0][0] = 2.0 / (r - l)
+    result[1][1] = 2.0 / (t - b)
+    result[2][2] = -2.0 / (z_far - z_near)
+    result[3][3] = 1
+
+    result[3][0] = - (r+l)/(r-l)
+    result[3][1] = -(t+b)/(t-b)
+    result[3][2] = -(z_far + z_near) / (z_far - z_near)
+
+    return result
+
+def get_lookat_matrix(eye, center, up):
+    """
+    Given the position of a camera, the point it is looking at, and
+    an up-direction. Computes the lookat matrix that moves all vectors
+    such that the camera is at the origin of the coordinate system,
+    looking down the z-axis.
+
+    Parameters
+    ----------
+    eye : array_like
+        The position of the camera. Must be 3D.
+
+    center : array_like
+        The location that the camera is looking at. Must be 3D.
+
+    up : array_like
+        The direction that is considered up for the camera. Must be
+        3D.
+
+    Returns
+    -------
+    lookat_matrix : ndarray
+        A new 4x4 2D array in homogeneous coordinates. This matrix
+        moves all vectors in the same way required to move the camera
+        to the origin of the coordinate system, with it pointing down
+        the negative z-axis.
+
+    """
+
+    eye = np.array(eye)
+    center = np.array(center)
+    up = np.array(up)
+
+    f = (center - eye) / np.linalg.norm(center - eye)
+    s = np.cross(f, up) / np.linalg.norm(np.cross(f, up))
+    u = np.cross(s, f)
+
+    result = np.zeros ( (4, 4), dtype = 'float32', order = 'C')
+
+    result[0][0] = s[0]
+    result[0][1] = s[1]
+    result[0][2] = s[2]
+    result[1][0] = u[0]
+    result[1][1] = u[1]
+    result[1][2] = u[2]
+    result[2][0] =-f[0]
+    result[2][1] =-f[1]
+    result[2][2] =-f[2]
+    result[0][3] =-np.dot(s, eye)
+    result[1][3] =-np.dot(u, eye)
+    result[2][3] = np.dot(f, eye)
+    result[3][3] = 1.0
+    return result
+
+
+def get_translate_matrix(dx, dy, dz):
+    """
+    Given a movement amount for each coordinate, creates a translation
+    matrix that moves the vector by each amount.
+
+    Parameters
+    ----------
+    dx : scalar
+        A translation amount for the x-coordinate
+
+    dy : scalar
+        A translation amount for the y-coordinate
+
+    dz : scalar
+        A translation amount for the z-coordinate
+
+    Returns
+    -------
+    trans_matrix : ndarray
+        A new 4x4 2D array. Represents a translation by dx, dy
+        and dz in each coordinate respectively.
+    """
+    result = np.zeros( (4, 4), dtype = 'float32', order = 'C')
+
+    result[0][0] = 1.0
+    result[1][1] = 1.0
+    result[2][2] = 1.0
+    result[3][3] = 1.0
+
+    result[0][3] = dx
+    result[1][3] = dy
+    result[2][3] = dz
+
+    return result
+
+def get_scale_matrix(dx, dy, dz):
+    """
+    Given a scaling factor for each coordinate, returns a matrix that
+    corresponds to the given scaling amounts.
+
+    Parameters
+    ----------
+    dx : scalar
+        A scaling factor for the x-coordinate.
+
+    dy : scalar
+        A scaling factor for the y-coordinate.
+
+    dz : scalar
+        A scaling factor for the z-coordinate.
+
+    Returns
+    -------
+    scale_matrix : ndarray
+        A new 4x4 2D array. Represents a scaling by dx, dy, and dz
+        in each coordinate respectively.
+    """
+    result = np.zeros( (4, 4), dtype = 'float32', order = 'C')
+
+    result[0][0] = dx
+    result[1][1] = dy
+    result[2][2] = dz
+    result[3][3] = 1
+
+    return result
+
 def get_rotation_matrix(theta, rot_vector):
     """
     Given an angle theta and a 3D vector rot_vector, this routine
@@ -786,11 +1040,11 @@
     rot_vector : array_like
         The axis of rotation.  Must be 3D.
 
-    Returns 
-    ------- 
-    rot_matrix : ndarray 
-         A new 3x3 2D array.  This is the representation of a 
-         rotation of theta radians about rot_vector in the simulation 
+    Returns
+    -------
+    rot_matrix : ndarray
+         A new 3x3 2D array.  This is the representation of a
+         rotation of theta radians about rot_vector in the simulation
          box coordinate frame
 
     See Also
@@ -818,13 +1072,122 @@
     uz = rot_vector[2]
     cost = np.cos(theta)
     sint = np.sin(theta)
-    
+
     R = np.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
                   [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
-    
+
     return R
 
+def quaternion_mult(q1, q2):
+    '''
+
+    Multiply two quaternions. The inputs are 4-component numpy arrays
+    in the order [w, x, y, z].
+
+    '''
+    w = q1[0]*q2[0] - q1[1]*q2[1] - q1[2]*q2[2] - q1[3]*q2[3]
+    x = q1[0]*q2[1] + q1[1]*q2[0] + q1[2]*q2[3] - q1[3]*q2[2]
+    y = q1[0]*q2[2] + q1[2]*q2[0] + q1[3]*q2[1] - q1[1]*q2[3]
+    z = q1[0]*q2[3] + q1[3]*q2[0] + q1[1]*q2[2] - q1[2]*q2[1]
+    return np.array([w, x, y, z])
+
+def quaternion_to_rotation_matrix(quaternion):
+    """
+
+    This converts a quaternion representation of on orientation to
+    a rotation matrix. The input is a 4-component numpy array in
+    the order [w, x, y, z], and the output is a 3x3 matrix stored
+    as a 2D numpy array.  We follow the approach in
+    "3D Math Primer for Graphics and Game Development" by
+    Dunn and Parberry.
+
+    """
+
+    w = quaternion[0]
+    x = quaternion[1]
+    y = quaternion[2]
+    z = quaternion[3]
+
+    R = np.empty((3, 3), dtype=np.float64)
+
+    R[0][0] = 1.0 - 2.0*y**2 - 2.0*z**2
+    R[0][1] = 2.0*x*y + 2.0*w*z
+    R[0][2] = 2.0*x*z - 2.0*w*y
+
+    R[1][0] = 2.0*x*y - 2.0*w*z
+    R[1][1] = 1.0 - 2.0*x**2 - 2.0*z**2
+    R[1][2] = 2.0*y*z + 2.0*w*x
+
+    R[2][0] = 2.0*x*z + 2.0*w*y
+    R[2][1] = 2.0*y*z - 2.0*w*x
+    R[2][2] = 1.0 - 2.0*x**2 - 2.0*y**2
+
+    return R
+
+def rotation_matrix_to_quaternion(rot_matrix):
+    '''
+
+    Convert a rotation matrix-based representation of an
+    orientation to a quaternion. The input should be a
+    3x3 rotation matrix, while the output will be a
+    4-component numpy array. We follow the approach in
+    "3D Math Primer for Graphics and Game Development" by
+    Dunn and Parberry.
+
+    '''
+    m11 = rot_matrix[0][0]
+    m12 = rot_matrix[0][1]
+    m13 = rot_matrix[0][2]
+    m21 = rot_matrix[1][0]
+    m22 = rot_matrix[1][1]
+    m23 = rot_matrix[1][2]
+    m31 = rot_matrix[2][0]
+    m32 = rot_matrix[2][1]
+    m33 = rot_matrix[2][2]
+
+    four_w_squared_minus_1 = m11 + m22 + m33
+    four_x_squared_minus_1 = m11 - m22 - m33
+    four_y_squared_minus_1 = m22 - m11 - m33
+    four_z_squared_minus_1 = m33 - m11 - m22
+    max_index = 0
+    four_max_squared_minus_1 = four_w_squared_minus_1
+    if (four_x_squared_minus_1 > four_max_squared_minus_1):
+        four_max_squared_minus_1 = four_x_squared_minus_1
+        max_index = 1
+    if (four_y_squared_minus_1 > four_max_squared_minus_1):
+        four_max_squared_minus_1 = four_y_squared_minus_1
+        max_index = 2
+    if (four_z_squared_minus_1 > four_max_squared_minus_1):
+        four_max_squared_minus_1 = four_z_squared_minus_1
+        max_index = 3
+
+    max_val = 0.5*np.sqrt(four_max_squared_minus_1 + 1.0)
+    mult = 0.25 / max_val
+
+    if (max_index == 0):
+        w = max_val
+        x = (m23 - m32) * mult
+        y = (m31 - m13) * mult
+        z = (m12 - m21) * mult
+    elif (max_index == 1):
+        x = max_val
+        w = (m23 - m32) * mult
+        y = (m12 + m21) * mult
+        z = (m31 + m13) * mult
+    elif (max_index == 2):
+        y = max_val
+        w = (m31 - m13) * mult
+        x = (m12 + m21) * mult
+        z = (m23 + m32) * mult
+    elif (max_index == 3):
+        z = max_val
+        w = (m12 - m21) * mult
+        x = (m31 + m13) * mult
+        y = (m23 + m32) * mult
+
+    return np.array([w, x, y, z])
+
 def get_ortho_basis(normal):
     xprime = np.cross([0.0,1.0,0.0],normal)
     if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
@@ -858,7 +1221,7 @@
     # The angle (theta) with respect to the normal (J), is the arccos
     # of the dot product of the normal with the normalized coordinate
     # vector.
-    
+
     res_normal = resize_vector(normal, coords)
 
     # check if the normal vector is normalized
@@ -866,11 +1229,11 @@
     res_normal = normalize_vector(res_normal)
 
     tile_shape = [1] + list(coords.shape)[1:]
-    
+
     J = np.tile(res_normal,tile_shape)
 
     JdotCoords = np.sum(J*coords,axis=0)
-    
+
     return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=0)) )
 
 def get_sph_phi(coords, normal):
@@ -881,7 +1244,7 @@
     # normal == z-hat (as is typical), then xprime == x-hat.
     #
     # The angle is then given by the arctan of the ratio of the
-    # yprime-component and the xprime-component of the coordinate 
+    # yprime-component and the xprime-component of the coordinate
     # vector.
 
     normal = normalize_vector(normal)
@@ -896,7 +1259,7 @@
 
     Px = np.sum(Jx*coords,axis=0)
     Py = np.sum(Jy*coords,axis=0)
-    
+
     return np.arctan2(Py,Px)
 
 def get_cyl_r(coords, normal):
@@ -908,12 +1271,12 @@
 
     tile_shape = [1] + list(coords.shape)[1:]
     J = np.tile(res_normal, tile_shape)
-    
+
     JcrossCoords = np.cross(J, coords, axisa=0, axisb=0, axisc=0)
     return np.sqrt(np.sum(JcrossCoords**2, axis=0))
 
 def get_cyl_z(coords, normal):
-    # The dot product of the normal (J) with the coordinate vector 
+    # The dot product of the normal (J) with the coordinate vector
     # gives the cylindrical height.
 
     res_normal = resize_vector(normal, coords)
@@ -922,7 +1285,7 @@
     tile_shape = [1] + list(coords.shape)[1:]
     J = np.tile(res_normal, tile_shape)
 
-    return np.sum(J*coords, axis=0)  
+    return np.sum(J*coords, axis=0)
 
 def get_cyl_theta(coords, normal):
     # This is identical to the spherical phi component
@@ -949,7 +1312,6 @@
 
 def get_cyl_theta_component(vectors, theta, normal):
     # The theta component of a vector is the vector dotted with thetahat
-    
     normal = normalize_vector(normal)
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
@@ -1027,7 +1389,7 @@
         YTArray(np.tile(rprime, tile_shape), "")
         for rprime in (res_xprime, res_yprime, res_zprime))
 
-    
+
     thetahat = Jx*np.cos(theta)*np.cos(phi) + \
                Jy*np.cos(theta)*np.sin(phi) - \
                Jz*np.sin(theta)

diff -r e1b2e0a5997f0cce7ff52547347e9e5214ad0d5c -r a0f4cce7a394e91bbb5e6b541c2232361d5de737 yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -29,3 +29,4 @@
     BoxSource, PointSource, CoordinateVectorSource, GridSource, \
     MeshSource
 from .zbuffer_array import ZBuffer
+from .interactive_vr_helpers import interactive_render

diff -r e1b2e0a5997f0cce7ff52547347e9e5214ad0d5c -r a0f4cce7a394e91bbb5e6b541c2232361d5de737 yt/visualization/volume_rendering/glfw_inputhook.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/glfw_inputhook.py
@@ -0,0 +1,110 @@
+# encoding: utf-8
+"""
+Enable pyglet to be used interacive by setting PyOS_InputHook.
+
+Authors
+-------
+
+* Nicolas P. Rougier
+* Fernando Perez
+"""
+
+#-----------------------------------------------------------------------------
+#  Copyright (C) 2008-2011  The IPython Development Team
+#
+#  Distributed under the terms of the BSD License.  The full license is in
+#  the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+# This has been modified from the Pyglet and GLUT event hooks to work with
+# glfw.
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import os
+import sys
+import time
+
+#-----------------------------------------------------------------------------
+# Platform-dependent imports and functions
+#-----------------------------------------------------------------------------
+
+if os.name == 'posix':
+    import select
+
+    def stdin_ready():
+        infds, outfds, erfds = select.select([sys.stdin],[],[],0)
+        if infds:
+            return True
+        else:
+            return False
+
+elif sys.platform == 'win32':
+    import msvcrt
+
+    def stdin_ready():
+        return msvcrt.kbhit()
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+def create_inputhook_glfw(mgr, render_loop):
+    """Run the GLFW event loop by processing pending events only.
+
+    This keeps processing pending events until stdin is ready.  After
+    processing all pending events, a call to time.sleep is inserted.  This is
+    needed, otherwise, CPU usage is at 100%.  This sleep time should be tuned
+    though for best performance.
+    """
+    def inputhook_glfw():
+        # We need to protect against a user pressing Control-C when IPython is
+        # idle and this is running. We trap KeyboardInterrupt and pass.
+        import cyglfw3 as glfw
+        try:
+            t = glfw.GetTime()
+            while not stdin_ready():
+                render_loop.next()
+
+                used_time = glfw.GetTime() - t
+                if used_time > 10.0:
+                    # print 'Sleep for 1 s'  # dbg
+                    time.sleep(1.0)
+                elif used_time > 0.1:
+                    # Few GUI events coming in, so we can sleep longer
+                    # print 'Sleep for 0.05 s'  # dbg
+                    time.sleep(0.05)
+                else:
+                    # Many GUI events coming in, so sleep only very little
+                    time.sleep(0.001)
+        except KeyboardInterrupt:
+            pass
+        return 0
+    return inputhook_glfw
+
+from IPython.lib.inputhook import inputhook_manager, InputHookBase
+
+ at inputhook_manager.register('glfw')
+class GLFWInputHook(InputHookBase):
+    def enable(self, app=None):
+        """Enable event loop integration with GLFW.
+
+        Parameters
+        ----------
+        app : ignored
+           Ignored, it's only a placeholder to keep the call signature of all
+           gui activation methods consistent, which simplifies the logic of
+           supporting magics.
+
+        Notes
+        -----
+        This methods sets the ``PyOS_InputHook`` for GLFW, which allows
+        GLFW to integrate with terminal based applications like
+        IPython.
+
+        """
+        inputhook_glfw = create_inputhook_glfw(self.manager, app)
+        self.manager.set_inputhook(inputhook_glfw)
+        return

diff -r e1b2e0a5997f0cce7ff52547347e9e5214ad0d5c -r a0f4cce7a394e91bbb5e6b541c2232361d5de737 yt/visualization/volume_rendering/input_events.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/input_events.py
@@ -0,0 +1,424 @@
+# encoding: utf-8
+"""
+Input event handlers for Interactive Data Visualization
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+# This is a part of the experimental Interactive Data Visualization
+
+from collections import defaultdict, namedtuple
+from yt.utilities.math_utils import \
+    get_perspective_matrix, \
+    get_orthographic_matrix
+import OpenGL.GL as GL
+import cyglfw3 as glfw
+import numpy as np
+import matplotlib.cm as cm
+import random
+
+event_registry = {}
+
+GLFWEvent = namedtuple("GLFWEvent", ['window', 'key', 'scancode', 'action',
+                       'mods', 'width', 'height'])
+
+class EventCollection(object):
+    '''Class handling mouse and keyboard events occurring in IDV
+    
+    Parameters
+    ----------
+    scene : :class:`yt.visualization.volume_rendering.interactive_vr.SceneGraph`
+        A current scene object used in the IDV
+
+    camera : :class:`yt.visualization.volume_rendering.interactive_vr.IDVCamera`
+        A current camera object used in the IDV
+    
+    '''
+    def __init__(self, scene, camera):
+        self.key_callbacks = defaultdict(list)
+        self.mouse_callbacks = defaultdict(list)
+        self.framebuffer_callbacks = []
+        self.render_events = []
+        self.camera = camera
+        self.scene = scene
+        self.draw = True
+
+    def key_call(self, window, key, scancode, action, mods):
+        draw = False
+        event = GLFWEvent(window, key, scancode, action, mods, None, None)
+        for f in self.key_callbacks[key, action, mods]:
+            draw = f(self, event) or draw
+        self.draw = self.draw or draw
+
+    def mouse_call(self, window, key, action, mods):
+        event = GLFWEvent(window, key, None, action, mods, None, None)
+        draw = False
+        for f in self.mouse_callbacks[key, action, mods]:
+            draw = f(self, event) or draw
+        self.draw = self.draw or draw
+
+    def framebuffer_call(self, window, width, height):
+        event = GLFWEvent(window, None, None, None, None, width, height)
+        draw = False
+        for f in self.framebuffer_callbacks:
+            draw = f(self, event) or draw
+        self.draw = self.draw or draw
+
+    def __call__(self, window):
+        event = GLFWEvent(window, None, None, None, None, None, None)
+        draw = False
+        for f in self.render_events:
+            draw = f(self, event) or draw
+        self.draw = self.draw or draw
+
+    def add_render_callback(self, func):
+        self.render_events.append(func)
+
+    def add_key_callback(self, func, key, action = "press", mods = None):
+        self._add_callback(self.key_callbacks, func, key, action, mods)
+
+    def add_mouse_callback(self, func, key, action = "press", mods = None):
+        self._add_callback(self.mouse_callbacks, func, key, action, mods)
+
+    def add_framebuffer_callback(self, func):
+        self.framebuffer_callbacks.append(func)
+
+    def _add_callback(self, d, func, key, action, mods):
+        if not callable(func):
+            func = event_registry[func]
+        if isinstance(key, str):
+            key = getattr(glfw, "KEY_%s" % key.upper())
+        if isinstance(action, str):
+            action = getattr(glfw, action.upper())
+        if not isinstance(mods, tuple):
+            mods = (mods, )
+        mod = 0
+        for m in mods:
+            if isinstance(m, str):
+                m = getattr(glfw, "MOD_%s" % m.upper())
+            elif m is None:
+                m = 0
+            mod |= m
+        # We can allow for multiple
+        d[key, action, mod].append(func)
+
+def register_event(name):
+    def _f(func):
+        event_registry[name] = func
+        return func
+    return _f
+
+ at register_event("framebuffer_size")
+def framebuffer_size_callback(event_coll, event):
+    GL.glViewport(0, 0, event.width, event.height)
+    event_coll.camera.aspect_ratio = float(event.width)/event.height
+    return True
+
+ at register_event("close_window")
+def close_window(event_coll, event):
+    '''Close main window'''
+    glfw.SetWindowShouldClose(event.window, True)
+
+ at register_event("zoomin")
+def zoomin(event_coll, event):
+    '''Zoom in the camera'''
+    camera = event_coll.camera
+    camera.position -= 0.05 * (camera.position - camera.focus) / \
+                np.linalg.norm(camera.position - camera.focus)
+    return True
+
+ at register_event("zoomout")
+def zoomout(event_coll, event):
+    '''Zoom out the camera'''
+    camera = event_coll.camera
+    camera.position += 0.05 * (camera.position - camera.focus) / \
+        np.linalg.norm(camera.position - camera.focus)
+    return True
+
+ at register_event("camera_orto")
+def camera_orto(event_coll, event):
+    '''Change camera to orthographic projection'''
+    camera = event_coll.camera
+    if camera.proj_func == get_orthographic_matrix:
+        return False
+    camera.proj_func = get_orthographic_matrix
+    camera.fov = np.tan(np.radians(camera.fov) / 2.0)
+    return True
+
+ at register_event("camera_proj")
+def camera_proj(event_coll, event):
+    '''Change camera to perspective projection'''
+    camera = event_coll.camera
+    if camera.proj_func == get_perspective_matrix:
+        return False
+    camera.proj_func = get_perspective_matrix
+    camera.fov = np.degrees(np.arctan(camera.fov) * 2.0)
+    return True
+
+
+ at register_event("shader_max")
+def shader_max(event_coll, event):
+    '''Use maximum intensity shader'''
+    print("Changing shader to max(intensity)")
+    scene = event_coll.scene
+    for coll in scene.collections:
+        coll.set_shader("default.v")
+        coll.set_shader("max_intensity.f")
+    scene.set_shader("passthrough.v")
+    scene.set_shader("apply_colormap.f")
+    for collection in scene.collections:
+        collection.set_fields_log(True)
+    scene.update_minmax()
+    GL.glBlendFunc(GL.GL_ONE, GL.GL_ONE)
+    GL.glBlendEquation(GL.GL_MAX)
+    return True
+
+ at register_event("shader_proj")
+def shader_proj(event_coll, event):
+    '''Use projection shader'''
+    print("Changing shader to projection")
+    scene = event_coll.scene
+    for coll in scene.collections:
+        coll.set_shader("default.v")
+        coll.set_shader("projection.f")
+    scene.set_shader("passthrough.v")
+    scene.set_shader("apply_colormap.f")
+    for collection in scene.collections:
+        collection.set_fields_log(False)
+    scene.update_minmax()
+    GL.glBlendFunc(GL.GL_ONE, GL.GL_ONE)
+    GL.glBlendEquation(GL.GL_FUNC_ADD)
+    return True
+
+ at register_event("shader_test")
+def shader_test(event_coll, event):
+    """Use transfer function shader"""
+    print("Changing shader to projection")
+    scene = event_coll.scene
+    for coll in scene.collections:
+        coll.set_shader("default.v")
+        coll.set_shader("transfer_function.f")
+    scene.set_shader("passthrough.v")
+    scene.set_shader("noop.f")
+    for collection in scene.collections:
+        collection.set_fields_log(True)
+    #scene.update_minmax()
+    # https://www.opengl.org/sdk/docs/man/html/glBlendFunc.xhtml
+    GL.glBlendEquationSeparate(GL.GL_FUNC_ADD, GL.GL_FUNC_ADD)
+    GL.glBlendFuncSeparate(GL.GL_ONE, GL.GL_ONE_MINUS_SRC_ALPHA, GL.GL_ONE, GL.GL_ZERO)
+    return True
+
+ at register_event("shader_lines")
+def shader_lines(event_coll, event):
+    print("Changing shader to projection")
+    scene = event_coll.scene
+    for coll in scene.collections:
+        coll.set_shader("default.v")
+        coll.set_shader("drawlines.f")
+    scene.set_shader("passthrough.v")
+    scene.set_shader("noop.f")
+    for collection in scene.collections:
+        collection.set_fields_log(True)
+    #scene.update_minmax()
+    # https://www.opengl.org/sdk/docs/man/html/glBlendFunc.xhtml
+    #GL.glBlendFunc(GL.GL_ONE, GL.GL_DST_ALPHA)
+    #GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
+    #GL.glBlendFunc(GL.GL_ONE_MINUS_SRC_ALPHA, GL.GL_SRC_ALPHA)
+    GL.glBlendFunc(GL.GL_ONE, GL.GL_ONE)
+    GL.glBlendEquation(GL.GL_MAX)
+    return True
+
+
+ at register_event("cmap_cycle")
+def cmap_cycle(event_coll, event):
+    """Change colormap"""
+    cmap = ['algae', 'kamae', 'viridis', 'inferno', 'magma']
+    cmap = cm.get_cmap(random.choice(cmap))
+    event_coll.camera.cmap = np.array(cmap(np.linspace(0, 1, 256)),
+        dtype=np.float32)
+    event_coll.camera.cmap_new = True
+    print("Setting colormap to {}".format(cmap.name))
+    return True
+
+ at register_event("cmap_max_up")
+def cmap_max_up(event_coll, event):
+    """Increase upper bound of colormap"""
+    if event_coll.camera.cmap_log:
+        event_coll.camera.cmap_max += 0.5
+    else:
+        event_coll.camera.cmap_max *= 2.0
+    return True
+
+ at register_event("cmap_max_down")
+def cmap_max_down(event_coll, event):
+    """Decrease upper bound of colormap"""
+    if event_coll.camera.cmap_log:
+        event_coll.camera.cmap_max -= 0.5
+    else:
+        event_coll.camera.cmap_max *= 0.5
+    return True
+
+ at register_event("cmap_min_up")
+def cmap_min_up(event_coll, event):
+    """Increase lower bound of colormap"""
+    if event_coll.camera.cmap_log:
+        event_coll.camera.cmap_min += 0.5
+    else:
+        event_coll.camera.cmap_min *= 2.0
+    return True
+
+ at register_event("cmap_min_down")
+def cmap_min_down(event_coll, event):
+    """Decrease lower bound of colormap"""
+    if event_coll.camera.cmap_log:
+        event_coll.camera.cmap_min -= 0.5
+    else:
+        event_coll.camera.cmap_min *= 0.5
+    return True
+
+ at register_event("cmap_toggle_log")
+def cmap_toggle_log(event_coll, event):
+    """Switch between linear and logarithmic scales"""
+    if event_coll.scene.data_logged:
+        print("Data is logged already, can't toggle scale to linear")
+        return False
+
+    if not event_coll.camera.cmap_log:
+        event_coll.camera.cmap_max = np.log10(event_coll.camera.cmap_max)
+        event_coll.camera.cmap_min = np.log10(event_coll.camera.cmap_min)
+    else:
+        event_coll.camera.cmap_max = 10.0 ** event_coll.camera.cmap_max
+        event_coll.camera.cmap_min = 10.0 ** event_coll.camera.cmap_min
+    event_coll.camera.cmap_log = not event_coll.camera.cmap_log
+    return True
+
+ at register_event("closeup")
+def closeup(event_coll, event):
+    """Change camera position to (0.01, 0.01, 0.01)"""
+    event_coll.camera.position = (0.01, 0.01, 0.01)
+    return True
+
+ at register_event("reset")
+def reset(event_coll, event):
+    """Change camera position to (-1.0, -1.0, -1.0)"""
+    event_coll.camera.position = (-1.0, -1.0, -1.0)
+    return True
+
+ at register_event("print_limits")
+def print_limits(event_coll, event):
+    """Print debug info about scene and camera"""
+    print(event_coll.scene.min_val, event_coll.scene.max_val)
+    print(event_coll.camera.cmap_min, event_coll.camera.cmap_max,
+          event_coll.camera.cmap_log)
+    return False
+
+ at register_event("debug_buffer")
+def debug_buffer(event_coll, event):
+    """Print debug info about framebuffer"""
+    buffer = event_coll.scene._retrieve_framebuffer()
+    print(buffer.min(), buffer.max())
+
+ at register_event("print_help")
+def print_help(event_coll, event):
+    """Print this help"""
+    key_map = {}
+    for key in (a for a in dir(glfw) if a.startswith("KEY")):
+        key_map[glfw.__dict__.get(key)] = key[4:]
+    for cb in (f for f in sorted(event_coll.key_callbacks)
+               if isinstance(f, tuple)):
+        print("%s - %s" % (key_map[cb[0]],
+                           event_coll.key_callbacks[cb][0].__doc__))
+    return False
+
+ at register_event("nplane_closer")
+def nplane_closer(event_coll, event):
+    print("nearplane", event_coll.camera.near_plane)
+    event_coll.camera.near_plane /= 2.0
+    return True
+
+ at register_event("nplane_further")
+def nplane_further(event_coll, event):
+    print("nearplane", event_coll.camera.near_plane)
+    event_coll.camera.near_plane *= 2.0
+    return True
+
+class MouseRotation(object):
+    '''Class translating mouse movements to positions in OpenGL scene's coordinates'''
+    def __init__(self):
+        self.start = None
+        self.rotation = False
+
+    def start_rotation(self, event_coll, event):
+        start_screen = glfw.GetCursorPos(event.window) # Screen coordinates
+        window_size = glfw.GetWindowSize(event.window)
+
+        norm_x = -1.0 + 2.0 * start_screen[0] / window_size[0]
+        norm_y = 1.0 - 2.0 * start_screen[1] / window_size[1]
+        self.start = (norm_x, norm_y)
+        self.rotation = True
+        return False
+
+    def stop_rotation(self, event_coll, event):
+        end_screen = glfw.GetCursorPos(event.window)
+        window_size = glfw.GetWindowSize(event.window)
+
+        norm_x = -1.0 + 2.0 * end_screen[0] / window_size[0]
+        norm_y = 1.0 - 2.0 * end_screen[1] / window_size[1]
+        end = (norm_x, norm_y)
+
+        event_coll.camera.update_orientation(
+            self.start[0], self.start[1], end[0], end[1])
+        self.rotation = False
+        return True
+
+    def do_rotation(self, event_coll, event):
+        if not self.rotation: return False
+        new_end_screen = glfw.GetCursorPos(event.window)
+        window_size = glfw.GetWindowSize(event.window)
+
+        norm_x = -1.0 + 2.0 * new_end_screen[0] / window_size[0]
+        norm_y = 1.0 - 2.0 * new_end_screen[1] / window_size[1]
+        new_end = (norm_x, norm_y)
+
+        event_coll.camera.update_orientation(
+            self.start[0], self.start[1], new_end[0], new_end[1])
+        self.start = new_end
+        return True
+
+class BlendFuncs(object):
+    '''Class allowing to switch between different GL blending functions'''
+    possibilities = (
+        "GL_ZERO", "GL_ONE", "GL_SRC_COLOR", "GL_ONE_MINUS_SRC_COLOR",
+        "GL_DST_COLOR", "GL_ONE_MINUS_DST_COLOR", "GL_SRC_ALPHA",
+        "GL_ONE_MINUS_SRC_ALPHA", "GL_DST_ALPHA", "GL_ONE_MINUS_DST_ALPHA",
+        "GL_CONSTANT_COLOR", "GL_ONE_MINUS_CONSTANT_COLOR",
+        "GL_CONSTANT_ALPHA", "GL_ONE_MINUS_CONSTANT_ALPHA")
+    source_i = 0
+    dest_i = 0
+
+    def next_source(self, event_coll, event):
+        self.source_i = (self.source_i + 1) % len(self.possibilities)
+        s = getattr(GL, self.possibilities[self.source_i])
+        d = getattr(GL, self.possibilities[self.dest_i])
+        print("Setting source to %s and dest to %s" %
+              (self.possibilities[self.source_i], 
+               self.possibilities[self.dest_i]))
+        GL.glBlendFunc(s, d)
+        return True
+
+    def next_dest(self, event_coll, event):
+        self.dest_i = (self.dest_i + 1) % len(self.possibilities)
+        s = getattr(GL, self.possibilities[self.source_i])
+        d = getattr(GL, self.possibilities[self.dest_i])
+        print("Setting source to %s and dest to %s" %
+              (self.possibilities[self.source_i],
+               self.possibilities[self.dest_i]))
+        GL.glBlendFunc(s, d)
+        return True

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.spacepope.org/pipermail/yt-svn-spacepope.org/attachments/20160407/3fd8f5af/attachment-0001.htm>


More information about the yt-svn mailing list