[yt-svn] commit/yt-doc: MatthewTurk: Adjusting the formatting of the new recipes and adding them to the cookbook.

Bitbucket commits-noreply at bitbucket.org
Thu Aug 2 19:37:23 PDT 2012


1 new commit in yt-doc:


https://bitbucket.org/yt_analysis/yt-doc/changeset/c25e14a729d2/
changeset:   c25e14a729d2
user:        MatthewTurk
date:        2012-08-03 04:37:08
summary:     Adjusting the formatting of the new recipes and adding them to the cookbook.
affected #:  3 files

diff -r 73d242ca848b602c17ed12d46e707b80b2202557 -r c25e14a729d2fa69f493675fe7c886a0322e7eeb source/cookbook/amrkdtree_downsampling.py
--- a/source/cookbook/amrkdtree_downsampling.py
+++ b/source/cookbook/amrkdtree_downsampling.py
@@ -1,15 +1,15 @@
-## Using AMRKDTree Homogenized Volumes to examine large datasets at lower
-resolution.
+## Using AMRKDTree Homogenized Volumes to examine large datasets at lower resolution.
 
 # In this example we will show how to use the AMRKDTree to take a simulation
 # with 8 levels of refinement and only use levels 0-3 to render the dataset.
 
 # We begin by loading up yt, and importing the AMRKDTree
 
-from yt.mods import * from yt.utilities.amr_kdtree.api import AMRKDTree
+from yt.mods import *
+from yt.utilities.amr_kdtree.api import AMRKDTree
 
 # Load up a data and print out the maximum refinement level
-pf = load('galaxy0030/galaxy0030') pf.h.max_level
+pf = load('IsolatedGalaxy/galaxy0030/galaxy0030')
 
 kd = AMRKDTree(pf)
 # Print out the total volume of all the bricks
@@ -17,44 +17,52 @@
 # Print out the number of cells
 print kd.count_cells()
 
-tf = ColorTransferFunction((-30, -22)) cam = pf.h.camera([0.5, 0.5, 0.5], [0.2,
-    0.3, 0.4], 0.10, 256, tf, volume=kd) tf.add_layers(4, 0.01, col_bounds =
-            [-27.5,-25.5], colormap = 'RdBu_r') cam.show(clip_ratio=6.0)
+tf = ColorTransferFunction((-30, -22))
+cam = pf.h.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256,
+                 tf, volume=kd)
+tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5], colormap = 'RdBu_r')
+cam.snapshot("v1.png", clip_ratio=6.0)
 
 # This rendering is okay, but lets say I'd like to improve it, and I don't want
 # to spend the time rendering the high resolution data.  What we can do is
 # generate a low resolution version of the AMRKDTree and pass that in to the
 # camera.  We do this by specifying a maximum refinement level of 3.
 
-kd_low_res = AMRKDTree(pf, l_max=3) print kd_low_res.count_volume() print
-kd_low_res.count_cells()
+kd_low_res = AMRKDTree(pf, l_max=3)
+print kd_low_res.count_volume()
+print kd_low_res.count_cells()
 
 # Now we pass this in as the volume to our camera, and render the snapshot
 # again.
 
-cam.volume = kd_low_res cam.show(clip_ratio=6.0)
+cam.volume = kd_low_res
+cam.snapshot("v4.png", clip_ratio=6.0)
 
 # This operation was substantiall faster.  Now lets modify the low resolution
 # rendering until we find something we like.
 
-tf.clear() tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+tf.clear()
+tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=na.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.show(clip_ratio=6.0)
+cam.snapshot("v2.png", clip_ratio=6.0)
 
 # This looks better.  Now let's try turning on opacity.
 
-tf.grey_opacity=True cam.show(clip_ratio=6.0)
+tf.grey_opacity=True
+cam.snapshot("v4.png", clip_ratio=6.0)
 
 # That seemed to pick out som interesting structures.  Now let's bump up the
 # opacity.
 
-tf.clear() tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+tf.clear()
+tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=10.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.show(clip_ratio=6.0)
+cam.snapshot("v3.png", clip_ratio=6.0)
 
 # This looks pretty good, now lets go back to the full resolution AMRKDTree
 
-cam.volume = kd cam.show(clip_ratio=6.0)
+cam.volume = kd
+cam.snapshot("v4.png", clip_ratio=6.0)
 
 # This looks great!
 


diff -r 73d242ca848b602c17ed12d46e707b80b2202557 -r c25e14a729d2fa69f493675fe7c886a0322e7eeb source/cookbook/complex_plots.rst
--- a/source/cookbook/complex_plots.rst
+++ b/source/cookbook/complex_plots.rst
@@ -126,3 +126,20 @@
 the command line.
 
 .. yt_cookbook:: zoomin_frames.py
+
+Opaque Volume Rendering
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make semi-opaque volume renderings, but also
+how to step through and try different things to identify the type of volume
+rendering you want.
+
+.. yt_cookbook:: opaque_rendering.py
+
+Downsampling Data for Volume Rendering
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to downsample data in a simulation to speed up
+volume rendering.
+
+.. yt_cookbook:: amrkdtree_downsampling.py


diff -r 73d242ca848b602c17ed12d46e707b80b2202557 -r c25e14a729d2fa69f493675fe7c886a0322e7eeb source/cookbook/opaque_rendering.py
--- a/source/cookbook/opaque_rendering.py
+++ b/source/cookbook/opaque_rendering.py
@@ -9,53 +9,60 @@
 
 from yt.mods import *
 
-pf = load("galaxy0030/galaxy0030")
+pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
 # We start by building a transfer function, and initializing a camera.
 
-tf = ColorTransferFunction((-30, -22)) cam = pf.h.camera([0.5, 0.5, 0.5], [0.2,
-    0.3, 0.4], 0.10, 256, tf)
+tf = ColorTransferFunction((-30, -22))
+cam = pf.h.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256, tf)
 
 # Now let's add some isocontours, and take a snapshot.
 
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5], colormap = 'RdBu_r')
-cam.show(clip_ratio=6.0)
+cam.snapshot("v1.png", clip_ratio=6.0)
 
 # In this case, the default alphas used (na.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
 # alpha values for each contour to go between 0.1 and 1.0
 
-tf.clear() tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=na.logspace(0,0,4), colormap = 'RdBu_r') cam.show(clip_ratio=6.0)
+tf.clear()
+tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+        alpha=na.logspace(0,0,4), colormap = 'RdBu_r')
+cam.snapshot("v2.png", clip_ratio=6.0)
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
 # start to be obcured
 
-tf.grey_opacity = True cam.show(clip_ratio=6.0)
+tf.grey_opacity = True
+cam.snapshot("v3.png", clip_ratio=6.0)
 
 # That looks pretty good, but let's start bumping up the opacity.
 
-tf.clear() tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+tf.clear()
+tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=10.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.show(clip_ratio=6.0)
+cam.snapshot("v4.png", clip_ratio=6.0)
 
 # Let's bump up again to see if we can obscure the inner contour.
 
-tf.clear() tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+tf.clear()
+tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=30.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.show(clip_ratio=6.0)
+cam.snapshot("v5.png", clip_ratio=6.0)
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
 # layer
 
-tf.clear() tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+tf.clear()
+tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=100.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.show(clip_ratio=6.0)
+cam.snapshot("v6.png", clip_ratio=6.0)
 
 # That is very opaque!  Now lets go back and see what it would look like with
 # grey_opacity = False
 
-tf.grey_opacity=False cam.show(clip_ratio=6.0)
+tf.grey_opacity=False
+cam.snapshot("v7.png", clip_ratio=6.0)
 
 # That looks pretty different, but the main thing is that you can see that the
 # inner contours are somewhat visible again.

Repository URL: https://bitbucket.org/yt_analysis/yt-doc/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list