[yt-svn] commit/yt-doc: 2 new changesets

Bitbucket commits-noreply at bitbucket.org
Thu Aug 2 19:27:22 PDT 2012


2 new commits in yt-doc:


https://bitbucket.org/yt_analysis/yt-doc/changeset/c93a4102daa1/
changeset:   c93a4102daa1
user:        samskillman
date:        2012-08-03 04:02:56
summary:     First pass at VR docs to fulfill issue https://bitbucket.org/yt_analysis/yt/issue/382/volume-rendering-docs
affected #:  3 files

diff -r 02cfc68adcc0694de9cc53f236b43cf984919d3a -r c93a4102daa1a7880a77c6de112faa2140fa2784 source/cookbook/amrkdtree_downsampling.py
--- /dev/null
+++ b/source/cookbook/amrkdtree_downsampling.py
@@ -0,0 +1,60 @@
+## Using AMRKDTree Homogenized Volumes to examine large datasets at lower
+resolution.
+
+# In this example we will show how to use the AMRKDTree to take a simulation
+# with 8 levels of refinement and only use levels 0-3 to render the dataset.
+
+# We begin by loading up yt, and importing the AMRKDTree
+
+from yt.mods import * from yt.utilities.amr_kdtree.api import AMRKDTree
+
+# Load up a data and print out the maximum refinement level
+pf = load('galaxy0030/galaxy0030') pf.h.max_level
+
+kd = AMRKDTree(pf)
+# Print out the total volume of all the bricks
+print kd.count_volume()
+# Print out the number of cells
+print kd.count_cells()
+
+tf = ColorTransferFunction((-30, -22)) cam = pf.h.camera([0.5, 0.5, 0.5], [0.2,
+    0.3, 0.4], 0.10, 256, tf, volume=kd) tf.add_layers(4, 0.01, col_bounds =
+            [-27.5,-25.5], colormap = 'RdBu_r') cam.show(clip_ratio=6.0)
+
+# This rendering is okay, but lets say I'd like to improve it, and I don't want
+# to spend the time rendering the high resolution data.  What we can do is
+# generate a low resolution version of the AMRKDTree and pass that in to the
+# camera.  We do this by specifying a maximum refinement level of 3.
+
+kd_low_res = AMRKDTree(pf, l_max=3) print kd_low_res.count_volume() print
+kd_low_res.count_cells()
+
+# Now we pass this in as the volume to our camera, and render the snapshot
+# again.
+
+cam.volume = kd_low_res cam.show(clip_ratio=6.0)
+
+# This operation was substantiall faster.  Now lets modify the low resolution
+# rendering until we find something we like.
+
+tf.clear() tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+        alpha=na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+cam.show(clip_ratio=6.0)
+
+# This looks better.  Now let's try turning on opacity.
+
+tf.grey_opacity=True cam.show(clip_ratio=6.0)
+
+# That seemed to pick out som interesting structures.  Now let's bump up the
+# opacity.
+
+tf.clear() tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+        alpha=10.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+cam.show(clip_ratio=6.0)
+
+# This looks pretty good, now lets go back to the full resolution AMRKDTree
+
+cam.volume = kd cam.show(clip_ratio=6.0)
+
+# This looks great!
+


diff -r 02cfc68adcc0694de9cc53f236b43cf984919d3a -r c93a4102daa1a7880a77c6de112faa2140fa2784 source/cookbook/opaque_rendering.py
--- /dev/null
+++ b/source/cookbook/opaque_rendering.py
@@ -0,0 +1,63 @@
+## Opaque Volume Rendering
+
+# The new version of yt also features opaque rendering, using grey opacity.
+# For example, this makes blues opaque to red and green.  In this example we
+# will explore how the opacity model you choose changes the appearance of the
+# rendering.
+
+# Here we start by loading up a dataset, in this case galaxy0030.
+
+from yt.mods import *
+
+pf = load("galaxy0030/galaxy0030")
+
+# We start by building a transfer function, and initializing a camera.
+
+tf = ColorTransferFunction((-30, -22)) cam = pf.h.camera([0.5, 0.5, 0.5], [0.2,
+    0.3, 0.4], 0.10, 256, tf)
+
+# Now let's add some isocontours, and take a snapshot.
+
+tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5], colormap = 'RdBu_r')
+cam.show(clip_ratio=6.0)
+
+# In this case, the default alphas used (na.logspace(-3,0,Nbins)) does not
+# accentuate the outer regions of the galaxy. Let's start by bringing up the
+# alpha values for each contour to go between 0.1 and 1.0
+
+tf.clear() tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+        alpha=na.logspace(0,0,4), colormap = 'RdBu_r') cam.show(clip_ratio=6.0)
+
+# Now let's set the grey_opacity to True.  This should make the inner portions
+# start to be obcured
+
+tf.grey_opacity = True cam.show(clip_ratio=6.0)
+
+# That looks pretty good, but let's start bumping up the opacity.
+
+tf.clear() tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+        alpha=10.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+cam.show(clip_ratio=6.0)
+
+# Let's bump up again to see if we can obscure the inner contour.
+
+tf.clear() tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+        alpha=30.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+cam.show(clip_ratio=6.0)
+
+# Now we are losing sight of everything.  Let's see if we can obscure the next
+# layer
+
+tf.clear() tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+        alpha=100.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+cam.show(clip_ratio=6.0)
+
+# That is very opaque!  Now lets go back and see what it would look like with
+# grey_opacity = False
+
+tf.grey_opacity=False cam.show(clip_ratio=6.0)
+
+# That looks pretty different, but the main thing is that you can see that the
+# inner contours are somewhat visible again.  
+
+


diff -r 02cfc68adcc0694de9cc53f236b43cf984919d3a -r c93a4102daa1a7880a77c6de112faa2140fa2784 source/visualizing/volume_rendering.rst
--- a/source/visualizing/volume_rendering.rst
+++ b/source/visualizing/volume_rendering.rst
@@ -5,12 +5,11 @@
 .. versionadded:: 1.6
 
 Volume rendering, as implemented in yt, is a mechanism by which rays are cast
-through a domain and then integrated along their path through gaseous material.
+through a domain, converting field values to emission and absorption, and producing a final image.
 This provides the ability to create off-axis projections, isocontour images,
-volume emission, and absorption from intervening material.  While the volume
-renderer is still a secondary project, acting as a supplement to the primary
-functionality of ``yt``, it shows promise for exploring data and creating
-*scientifically-informed* visualizations of simulations.
+volume emission, and absorption from intervening material.  The primary goal 
+of the volume rendering in ``yt`` is to provide the ability to make
+*scientifically-informed* visualizations of simulations.  
 
 The volume renderer is implemented in a hybrid of Python and Cython, which is
 Python-like code compiled down to C.  It has been optimized, but it is still a
@@ -18,7 +17,58 @@
 processing units (GPUs).  However, while the rendering engine itself may not
 directly translate to GPU code (OpenCL, CUDA or OpenGL), the Python structures:
 partitioning, transfer functions, display, etc., may be useful in the future
-for transitioning the rendering to the GPU.
+for transitioning the rendering to the GPU.  In addition, this allows users to create
+volume renderings on traditional supercomputing platforms that may not have access to GPUs.
+
+Tutorial
+--------
+.. versionadded:: 1.6
+
+Volume renderings are created by combining three objects: a volume
+homogenization; a transfer function, and a camera object.
+
+#. Find the appropriate bounds for your data.
+#. Create a ColorTransferFunction object.
+#. Create a Camera object, which homogenizes the volume and orients the viewing
+   direction
+#. Take a snapshot and save the image.
+
+Here is a working example for the *CHANGE THIS NOTE* dataset.
+
+.. code-block:: python
+
+   from yt.mods import *
+
+   # Load your data
+   pf = load("galaxy0030")
+
+   # Choose a field
+   field = 'Density'
+
+   # Find the bounds in log space of for your field
+   dd = pf.h.all_data()
+   mi, ma = na.log10(dd.quantities["Extrema"](Density)[0])
+
+   # Create a transfer function
+   tf = ColorTransferFunction((mi - 0.5, ma + 0.5))
+   tf.add_layers(8, w=0.01)
+
+   # Set up the camera parameters: center, looking direction, width, resolution
+   c = (pf.domain_right_edge + pf.domain_left_edge)/2.0
+   L = na.array([1.0, 1.0, 1.0])
+   W = 1.0 / pf["unitary"]
+   N = 512
+
+   # Create a camera object
+   cam = pf.h.camera(c, L, W, N, tf)
+
+   # Set up the filename using pf and field.
+   fn = "%s_%s_rendering.png" % (pf, field)
+
+   # Take a snapshot, saving the image to file fn.
+   cam.snapshot(fn)
+
+
 
 Method
 ------
@@ -105,30 +155,7 @@
 Here's a fully functional script that demonstrates how to use the camera
 interface.
 
-.. code-block:: python
-
-   from yt.mods import *
-
-   pf = load("DD1701/DD1701")
-
-   dd = pf.h.all_data()
-   mi, ma = na.log10(dd.quantities["Extrema"]("Density")[0])
-   mi -= 0.1 ; ma += 0.1 # To allow a bit of room at the edges
-
-   tf = ColorTransferFunction((mi, ma))
-   tf.add_layers(8, w=0.01)
-   c = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-   L = na.array([1.0, 1.0, 1.0])
-   W = 0.5 / pf["unitary"]
-
-   N = 512
-
-   cam = pf.h.camera(c, L, W, N, tf)
-   fn = "%s_image.png" % pf
-
-   cam.snapshot(fn)
-
-For another example, see the cookbook :ref:`cookbook-simple_volume_rendering`.
+For an example, see the cookbook :ref:`cookbook-simple_volume_rendering`.
 
 The :class:`~yt.visualization.volume_rendering.camera.StereoPairCamera` object
 has a single primary method,
@@ -186,6 +213,12 @@
 :meth:`~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction.add_gaussian`,
 which will allow you to specify the colors directly.
 
+An alternate method for modifying the colormap is done using
+`~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction.map_to_colormap`,
+where you can map a segment of the transfer function space to an entire
+colormap at a single alpha value.  This is sometimes useful for very opaque
+renderings.
+
 See :ref:`cookbook-simple_volume_rendering` for an example usage.
 
 Projection Transfer Function
@@ -334,6 +367,124 @@
 user-friendly.  If you would like to participate in this effort, that would be
 awesome.  See :ref:`contributing-code` for more info.
 
+MPI Parallelization
+-------------------
+Currently the volume renderer is parallelized using MPI to decompose the volume
+by attempting to split up the
+:class:`~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree` in a balanced way.  This
+has two advantages: 
+
+#.  The :class:`~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree`
+    construction is parallelized since each MPI task only needs
+    to know about the part of the tree it will traverse.
+#.  Each MPI task will only read data for portion of the volume that it has
+    assigned.
+
+Once the :class:`~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree` has been 
+constructed, each MPI task begins the rendering
+phase until all of its bricks are completed.  At that point, each MPI task has
+a full image plane which we then use a tree reduction to construct the final
+image, using alpha blending to add the images together at each reduction phase.
+
+Caveats:
+
+#.  At this time, the :class:`~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree`
+    can only be decomposed by a power of 2 MPI
+    tasks.  If a number of tasks not equal to a power of 2 are used, the largest
+    power of 2 below that number is used, and the remaining cores will be idle.
+    This issue is being actively addressed by current development.
+#.  Each MPI task, currently, holds the entire image plane.  Therefore when
+    image plane sizes get large (>2048^2), the memory usage can also get large,
+    limiting the number of MPI tasks you can use.  This is also being addressed
+    in current development by using image plane decomposition.
+
+OpenMP Parallelization
+----------------------
+.. versionadded:: 2.4
+
+The volume rendering also parallelized using the OpenMP interface in Cython.
+While the MPI parallelization is done using domain decomposition, the OpenMP
+threading parallelizes the rays intersecting a given brick of data.  As the
+average brick size relative to the image plane increases, the parallel
+efficiency increases. 
+
+By default, the volume renderer will use the total number of cores available on
+the symmetric multiprocessing (SMP) compute platform.  For example, if you have
+a shiny new laptop with 8 cores, you'll by default launch 8 OpenMP threads.
+The number of threads can be controlled with the num_threads keyword in
+:meth:`~yt.visualization.volume_rendering.camera.Camera.snapshot`.  You may also restrict the number of OpenMP threads used
+by default by modifying the environment variable OMP_NUM_THREADS. 
+
+Running in Hybrid MPI + OpenMP
+------------------------------
+.. versionadded:: 2.4
+
+The two methods for volume rendering parallelization can be used together to
+leverage large supercomputing resources.  When choosing how to balance the
+number of MPI tasks vs OpenMP threads, there are a few things to keep in mind.
+For these examples, we will assume you are using Nmpi MPI tasks, and Nmp OpenMP
+tasks, on a total of P cores. We will assume that the machine has a Nnode SMP
+nodes, each with cores_per_node cores per node.
+
+#.  For each MPI task, num_threads (or OMP_NUM_THREADS) OpenMP threads will be
+    used. Therefore you should usually make sure that Nmpi*Nmp = P.  
+#.  For simulations with many grids/AMRKDTree bricks, you generally want to increase Nmpi.
+#.  For simulations with large image planes (>2048^2), you generally want to
+    decrease Nmpi and increase Nmp. This is because, currently, each MPI task
+    stores the entire image plane, and doing so can approach the memory limits
+    of a given SMP node.
+#.  Please make sure you understand the (super)computer topology in terms of
+    the numbers of cores per socket, node, etc when making these decisions.
+#.  For many cases when rendering using your laptop/desktop, OpenMP will
+    provide a good enough speedup by default that it is preferable to launching
+    the MPI tasks.
+
+Opacity
+-------
+.. versionadded:: 2.4
+
+There are currently two models for opacity when rendering a volume, which are
+controlled in the ColorTransferFunction with the keyword
+grey_opacity=False(default)/True. The first (default) will act such for each of
+the r,g,b channels, each channel is only opaque to itself.  This means that if
+a ray that has some amount of red then encounters material that emits blue, the
+red will still exist and in the end that pixel will be a combination of blue
+and red.  However, if the ColorTransferFunction is set up with
+grey_opacity=True, then blue will be opaque to red, and only the blue emission
+will remain.  
+
+For an in-depth example, please see the cookbook example on opaque renders here: 
+:ref:`cookbook-opaque_rendering`.
+
+Lighting
+--------
+.. versionadded:: 2.4
+
+Lighting can be optionally used in volume renders by specifying use_light=True
+in the Camera object creation.  If used, one can then change the default
+lighting color and direction by modifying Camera.light_dir and
+Camera.light_rgb.  Lighting works in this context by evaluating not only the
+field value but also its gradient in order to compute the emissivity.  This is
+not the same as casting shadows, but provides a way of highlighting sides of a
+contour.  
+
+Generating a Homogenized Volume
+-------------------------------
+
+In order to perform a volume rendering, the data must first be decomposed into
+a HomogenizedVolume object.  This structure splits the domain up into
+single-resolution tiles which cover the domain at the highest resolution
+possible for a given point in space.  This means that every point in space is
+mapped to exactly one data point, which receives its values from the highest
+resolution grid that covers that volume.
+
+The creation of these homogenized volumes is done during the 
+:class:`~yt.visualization.volume_rendering.camera.Camera`  object
+instantiation by default.  However, in some cases it is useful to first build
+your homogenized volume to then be passed in to the camera. A sample usage is shown
+in :ref:`cookbook-amrkdtree_downsampling`.
+
+
 The Simple Volume Rendering Interface
 -------------------------------------
 .. warning:: This has been removed in yt-2.3.  Please use :ref:`the_camera_interface`.



https://bitbucket.org/yt_analysis/yt-doc/changeset/73d242ca848b/
changeset:   73d242ca848b
user:        samskillman
date:        2012-08-03 04:19:07
summary:     Modifying the example on the vr page, fixing a few links.
affected #:  1 file

diff -r c93a4102daa1a7880a77c6de112faa2140fa2784 -r 73d242ca848b602c17ed12d46e707b80b2202557 source/visualizing/volume_rendering.rst
--- a/source/visualizing/volume_rendering.rst
+++ b/source/visualizing/volume_rendering.rst
@@ -33,42 +33,52 @@
    direction
 #. Take a snapshot and save the image.
 
-Here is a working example for the *CHANGE THIS NOTE* dataset.
+Here is a working example for the IsolatedGalaxy dataset from the 2012 yt workshop.
 
 .. code-block:: python
 
-   from yt.mods import *
-
-   # Load your data
-   pf = load("galaxy0030")
-
-   # Choose a field
-   field = 'Density'
-
-   # Find the bounds in log space of for your field
-   dd = pf.h.all_data()
-   mi, ma = na.log10(dd.quantities["Extrema"](Density)[0])
-
-   # Create a transfer function
-   tf = ColorTransferFunction((mi - 0.5, ma + 0.5))
-   tf.add_layers(8, w=0.01)
-
-   # Set up the camera parameters: center, looking direction, width, resolution
-   c = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-   L = na.array([1.0, 1.0, 1.0])
-   W = 1.0 / pf["unitary"]
-   N = 512
-
-   # Create a camera object
-   cam = pf.h.camera(c, L, W, N, tf)
-
-   # Set up the filename using pf and field.
-   fn = "%s_%s_rendering.png" % (pf, field)
-
-   # Take a snapshot, saving the image to file fn.
-   cam.snapshot(fn)
-
-
+    from yt.mods import *
+    
+    pf = load("galaxy0030/galaxy0030")
+    # Choose a field
+    field = 'Density'
+    # Do you want the log of the field?
+    use_log = True
+    
+    # Make sure everything is set to act correctly based on log
+    if use_log:
+        pf.h
+        pf.field_info[field].take_log=use_log
+    
+    # Find the bounds in log space of for your field
+    dd = pf.h.all_data()
+    mi, ma = dd.quantities["Extrema"](field)[0]
+    
+    if use_log:
+        mi,ma = na.log10(mi), na.log10(ma)
+    
+    # Instantiate the ColorTransferfunction.
+    tf = ColorTransferFunction((mi, ma))
+    
+    # Set up the camera parameters: center, looking direction, width, resolution
+    c = (pf.domain_right_edge + pf.domain_left_edge)/2.0
+    L = na.array([1.0, 1.0, 1.0])
+    W = 0.3 / pf["unitary"]
+    N = 256 
+    
+    # Create a camera object
+    cam = pf.h.camera(c, L, W, N, tf)
+    
+    # Set up the filename using pf and field.
+    fn = "%s_%s_rendering.png" % (pf, field)
+    
+    # Now let's add some isocontours, and take a snapshot.
+    tf.add_layers(10, 0.01, colormap = 'RdBu_r')
+    im = cam.snapshot('test_rendering.png',clip_ratio=6.0)
+    
+    # Take a snapshot, saving the image to file fn.
+    cam.snapshot(fn)
+    
 
 Method
 ------

Repository URL: https://bitbucket.org/yt_analysis/yt-doc/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list