[yt-svn] commit/yt: 22 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed May 18 11:24:02 PDT 2016


22 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/d801d4e53082/
Changeset:   d801d4e53082
Branch:      yt
User:        atmyers
Date:        2016-03-20 21:39:06+00:00
Summary:     do not print a warning about Embree not being set up unless an Embree test compile fails
Affected #:  1 file

diff -r 588d7db87104a021b08a9169b15a092f03c61ed3 -r d801d4e53082aea7905511cb3ef4cc00a6e5b5ab setupext.py
--- a/setupext.py
+++ b/setupext.py
@@ -76,17 +76,58 @@
     '''
 
     rd = os.environ.get('EMBREE_DIR')
-    if rd is not None:
-        return rd
-    print("EMBREE_DIR not set. Attempting to read embree.cfg")
+    if rd is None:
+        try:
+            rd = open("embree.cfg").read().strip()
+        except IOError:
+            rd = '/usr/local'
+
+    fail_msg = "Pyembree is installed, but I could not compile Embree test code. \n" + \
+               "Attempted to find Embree headers in %s. \n" % rd + \
+               "If this is not correct, please set your correct embree location \n" + \
+               "using EMBREE_DIR environment variable or your embree.cfg file. \n" + \
+               "Please see http://yt-project.org/docs/dev/visualizing/unstructured_mesh_rendering.html " + \
+               "for more information."
+
+    # Create a temporary directory
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+
     try:
-        rd = open("embree.cfg").read().strip()
-        return rd
-    except IOError:
-        print("Reading Embree location from embree.cfg failed.")
-        print("If compilation fails, please place the base directory")
-        print("of your Embree install in embree.cfg and restart.")
-        return '/usr/local'
+        os.chdir(tmpdir)
+
+        # Get compiler invocation
+        compiler = os.getenv('CXX', 'c++')
+        compiler = compiler.split(' ')
+
+        # Attempt to compile a test script.
+        filename = r'test.cpp'
+        file = open(filename, 'wt', 1)
+        file.write(
+            '#include "embree2/rtcore.h"\n'
+            'int main() {\n'
+            'return 0;\n'
+            '}'
+        )
+        file.flush()
+        with open(os.devnull, 'w') as fnull:
+            exit_code = subprocess.call(compiler + ['-I%s/include/' % rd, filename],
+                             stdout=fnull, stderr=fnull)
+
+        # Clean up
+        file.close()
+
+    except OSError:
+        print fail_msg
+
+    finally:
+        os.chdir(curdir)
+        shutil.rmtree(tmpdir)
+
+    if exit_code != 0:
+        print fail_msg
+
+    return rd
 
 
 def get_mercurial_changeset_id(target_dir):


https://bitbucket.org/yt_analysis/yt/commits/22159764d57b/
Changeset:   22159764d57b
Branch:      yt
User:        atmyers
Date:        2016-03-20 21:45:58+00:00
Summary:     update install instructions, and give an example embree.cfg file
Affected #:  1 file

diff -r d801d4e53082aea7905511cb3ef4cc00a6e5b5ab -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -14,7 +14,7 @@
 
 .. code-block:: bash
 
-    conda install -c http://use.yt/with_conda/ yt=3.3_dev
+    conda install -c http://use.yt/with_conda/ yt
 
 If you want to install from source, you can use the ``get_yt.sh`` script.
 Be sure to set the INST_YT_SOURCE and INST_UNSTRUCTURED flags to 1 at the 
@@ -73,7 +73,13 @@
 
 as usual. Finally, if you create a file called embree.cfg in the yt-hg directory with
 the location of the embree installation, the setup script will find this and use it, 
-provided EMBREE_DIR is not set. We recommend one of the later two methods, especially
+provided EMBREE_DIR is not set. An example embree.cfg file could like this:
+
+.. code-block:: bash
+
+   /opt/local/
+
+We recommend one of the later two methods, especially
 if you plan on re-compiling the cython extensions regularly. Note that none of this is
 neccessary if you installed embree into a location that is in your default path, such
 as /usr/local.


https://bitbucket.org/yt_analysis/yt/commits/a4624d509d5e/
Changeset:   a4624d509d5e
Branch:      yt
User:        atmyers
Date:        2016-04-09 18:18:40+00:00
Summary:     merging.
Affected #:  222 files

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py
+include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py CONTRIBUTING.rst
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js
@@ -12,4 +12,5 @@
 prune doc/source/reference/api/generated
 prune doc/build
 recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
+recursive-include yt/visualization/volume_rendering/shaders *.fragmentshader *.vertexshader
 prune yt/frontends/_skeleton

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 doc/cheatsheet.tex
--- a/doc/cheatsheet.tex
+++ b/doc/cheatsheet.tex
@@ -7,12 +7,12 @@
 
 % To make this come out properly in landscape mode, do one of the following
 % 1.
-%  pdflatex latexsheet.tex
+%  pdflatex cheatsheet.tex
 %
 % 2.
-%  latex latexsheet.tex
-%  dvips -P pdf  -t landscape latexsheet.dvi
-%  ps2pdf latexsheet.ps
+%  latex cheatsheet.tex
+%  dvips -P pdf  -t landscape cheatsheet.dvi
+%  ps2pdf cheatsheet.ps
 
 
 % If you're reading this, be prepared for confusion.  Making this was
@@ -45,7 +45,7 @@
 
 % Turn off header and footer
 \pagestyle{empty}
- 
+
 
 % Redefine section commands to use less space
 \makeatletter
@@ -117,26 +117,26 @@
 including a list of the available flags.
 
 \texttt{iyt}\textemdash\ Load yt and IPython. \\
-\texttt{yt load} {\it dataset}   \textemdash\ Load a single dataset.  \\
+\texttt{yt load} \textit{dataset}   \textemdash\ Load a single dataset.  \\
 \texttt{yt help} \textemdash\ Print yt help information. \\
-\texttt{yt stats} {\it dataset} \textemdash\ Print stats of a dataset. \\
+\texttt{yt stats} \textit{dataset} \textemdash\ Print stats of a dataset. \\
 \texttt{yt update} \textemdash\ Update yt to most recent version.\\
 \texttt{yt update --all} \textemdash\ Update yt and dependencies to most recent version. \\
 \texttt{yt version} \textemdash\ yt installation information. \\
 \texttt{yt notebook} \textemdash\ Run the IPython notebook server. \\
-\texttt{yt upload\_image} {\it image.png} \textemdash\ Upload PNG image to imgur.com. \\
-\texttt{yt upload\_notebook} {\it notebook.nb} \textemdash\ Upload IPython notebook to hub.yt-project.org.\\
-\texttt{yt plot} {\it dataset} \textemdash\ Create a set of images.\\
-\texttt{yt render} {\it dataset} \textemdash\ Create a simple
+\texttt{yt upload\_image} \textit{image.png} \textemdash\ Upload PNG image to imgur.com. \\
+\texttt{yt upload\_notebook} \textit{notebook.nb} \textemdash\ Upload IPython notebook to hub.yt-project.org.\\
+\texttt{yt plot} \textit{dataset} \textemdash\ Create a set of images.\\
+\texttt{yt render} \textit{dataset} \textemdash\ Create a simple
  volume rendering. \\
-\texttt{yt mapserver} {\it dataset} \textemdash\ View a plot/projection in a Gmaps-like
+\texttt{yt mapserver} \textit{dataset} \textemdash\ View a plot/projection in a Gmaps-like
  interface. \\
-\texttt{yt pastebin} {\it text.out} \textemdash\ Post text to the pastebin at
- paste.yt-project.org. \\ 
-\texttt{yt pastebin\_grab} {\it identifier} \textemdash\ Print content of pastebin to
+\texttt{yt pastebin} \textit{text.out} \textemdash\ Post text to the pastebin at
+ paste.yt-project.org. \\
+\texttt{yt pastebin\_grab} \textit{identifier} \textemdash\ Print content of pastebin to
  STDOUT. \\
 \texttt{yt bugreport} \textemdash\ Report a yt bug. \\
-\texttt{yt hop} {\it dataset} \textemdash\  Run hop on a dataset. \\
+\texttt{yt hop} \textit{dataset} \textemdash\  Run hop on a dataset. \\
 
 \subsection{yt Imports}
 In order to use yt, Python must load the relevant yt modules into memory.
@@ -144,15 +144,15 @@
 used as part of a script.
 \newlength{\MyLen}
 \settowidth{\MyLen}{\texttt{letterpaper}/\texttt{a4paper} \ }
-\texttt{import yt}  \textemdash\ 
+\texttt{import yt}  \textemdash\
 Load yt. \\
-\texttt{from yt.config import ytcfg}  \textemdash\ 
+\texttt{from yt.config import ytcfg}  \textemdash\
 Used to set yt configuration options.
 If used, must be called before importing any other module.\\
-\texttt{from yt.analysis\_modules.\emph{halo\_finding}.api import \textasteriskcentered}  \textemdash\ 
+\texttt{from yt.analysis\_modules.\emph{halo\_finding}.api import \textasteriskcentered}  \textemdash\
 Load halo finding modules. Other modules
-are loaded in a similar way by swapping the 
-{\em emphasized} text.
+are loaded in a similar way by swapping the
+\emph{emphasized} text.
 See the \textbf{Analysis Modules} section for a listing and short descriptions of each.
 
 \subsection{YTArray}
@@ -163,32 +163,32 @@
 very brief list of some useful ones.
 \settowidth{\MyLen}{\texttt{multicol} }\\
 \texttt{v = a.in\_cgs()} \textemdash\ Return the array in CGS units \\
-\texttt{v = a.in\_units('Msun/pc**3')} \textemdash\ Return the array in solar masses per cubic parsec \\ 
+\texttt{v = a.in\_units('Msun/pc**3')} \textemdash\ Return the array in solar masses per cubic parsec \\
 \texttt{v = a.max(), a.min()} \textemdash\ Return maximum, minimum of \texttt{a}. \\
 \texttt{index = a.argmax(), a.argmin()} \textemdash\ Return index of max,
 min value of \texttt{a}.\\
-\texttt{v = a[}{\it index}\texttt{]} \textemdash\ Select a single value from \texttt{a} at location {\it index}.\\
-\texttt{b = a[}{\it i:j}\texttt{]} \textemdash\ Select the slice of values from
+\texttt{v = a[}\textit{index}\texttt{]} \textemdash\ Select a single value from \texttt{a} at location \textit{index}.\\
+\texttt{b = a[}\textit{i:j}\texttt{]} \textemdash\ Select the slice of values from
 \texttt{a} between
-locations {\it i} to {\it j-1} saved to a new Numpy array \texttt{b} with length {\it j-i}. \\
+locations \textit{i} to \textit{j-1} saved to a new Numpy array \texttt{b} with length \textit{j-i}. \\
 \texttt{sel = (a > const)} \textemdash\ Create a new boolean Numpy array
 \texttt{sel}, of the same shape as \texttt{a},
 that marks which values of \texttt{a > const}. Other operators (e.g. \textless, !=, \%) work as well.\\
 \texttt{b = a[sel]} \textemdash\ Create a new Numpy array \texttt{b} made up of
 elements from \texttt{a} that correspond to elements of \texttt{sel}
-that are {\it True}. In the above example \texttt{b} would be all elements of \texttt{a} that are greater than \texttt{const}.\\
-\texttt{a.write\_hdf5({\it filename.h5})} \textemdash\ Save \texttt{a} to the hdf5 file {\it filename.h5}.\\
+that are \textit{True}. In the above example \texttt{b} would be all elements of \texttt{a} that are greater than \texttt{const}.\\
+\texttt{a.write\_hdf5(\textit{filename.h5})} \textemdash\ Save \texttt{a} to the hdf5 file \textit{filename.h5}.\\
 
 \subsection{IPython Tips}
 \settowidth{\MyLen}{\texttt{multicol} }
 These tips work if IPython has been loaded, typically either by invoking
 \texttt{iyt} or \texttt{yt load} on the command line, or using the IPython notebook (\texttt{yt notebook}).
 \texttt{Tab complete} \textemdash\ IPython will attempt to auto-complete a
-variable or function name when the \texttt{Tab} key is pressed, e.g. {\it HaloFi}\textendash\texttt{Tab} would auto-complete
-to {\it HaloFinder}. This also works with imports, e.g. {\it from numpy.random.}\textendash\texttt{Tab}
+variable or function name when the \texttt{Tab} key is pressed, e.g. \textit{HaloFi}\textendash\texttt{Tab} would auto-complete
+to \textit{HaloFinder}. This also works with imports, e.g. \textit{from numpy.random.}\textendash\texttt{Tab}
 would give you a list of random functions (note the trailing period before hitting \texttt{Tab}).\\
 \texttt{?, ??} \textemdash\ Appending one or two question marks at the end of any object gives you
-detailed information about it, e.g. {\it variable\_name}?.\\
+detailed information about it, e.g. \textit{variable\_name}?.\\
 Below a few IPython ``magics'' are listed, which are IPython-specific shortcut commands.\\
 \texttt{\%paste} \textemdash\ Paste content from the system clipboard into the IPython shell.\\
 \texttt{\%hist} \textemdash\ Print recent command history.\\
@@ -204,40 +204,40 @@
 
 \subsection{Load and Access Data}
 The first step in using yt is to reference a simulation snapshot.
-After that, simulation data is generally accessed in yt using {\it Data Containers} which are Python objects
+After that, simulation data is generally accessed in yt using \textit{Data Containers} which are Python objects
 that define a region of simulation space from which data should be selected.
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{ds = yt.load(}{\it dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
+\texttt{ds = yt.load(}\textit{dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
 \texttt{dd = ds.all\_data()} \textemdash\ Select the entire volume.\\
-\texttt{a = dd[}{\it field\_name}\texttt{]} \textemdash\ Copies the contents of {\it field} into the
+\texttt{a = dd[}\textit{field\_name}\texttt{]} \textemdash\ Copies the contents of \textit{field} into the
 YTArray \texttt{a}. Similarly for other data containers.\\
 \texttt{ds.field\_list} \textemdash\ A list of available fields in the snapshot. \\
 \texttt{ds.derived\_field\_list} \textemdash\ A list of available derived fields
 in the snapshot. \\
 \texttt{val, loc = ds.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
 the maximum of the field \texttt{Density} and its \texttt{loc}ation. \\
-\texttt{sp = ds.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
-container. {\it cen} may be a coordinate, or ``max'' which 
-centers on the max density point. {\it radius} may be a float in 
-code units or a tuple of ({\it length, unit}).\\
+\texttt{sp = ds.sphere(}\textit{cen}\texttt{,}\textit{radius}\texttt{)} \textemdash\   Create a spherical data
+container. \textit{cen} may be a coordinate, or ``max'' which
+centers on the max density point. \textit{radius} may be a float in
+code units or a tuple of (\textit{length, unit}).\\
 
-\texttt{re = ds.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
-rectilinear data container. {\it cen} is required but not used.
-{\it left} and {\it right edge} are coordinate values that define the region.
+\texttt{re = ds.region(\textit{cen}, \textit{left edge}, \textit{right edge})} \textemdash\ Create a
+rectilinear data container. \textit{cen} is required but not used.
+\textit{left} and \textit{right edge} are coordinate values that define the region.
 
-\texttt{di = ds.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
-Create a cylindrical data container centered at {\it cen} along the 
-direction set by {\it normal},with total length
- 2$\times${\it height} and with radius {\it radius}. \\
- 
-\texttt{ds.save\_object(sp, {\it ``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
-\texttt{sp = ds.load\_object({\it ``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
+\texttt{di = ds.disk(\textit{cen}, \textit{normal}, \textit{radius}, \textit{height})} \textemdash\
+Create a cylindrical data container centered at \textit{cen} along the
+direction set by \textit{normal},with total length
+ 2$\times$\textit{height} and with radius \textit{radius}. \\
+
+\texttt{ds.save\_object(sp, \textit{``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
+\texttt{sp = ds.load\_object(\textit{``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
 
 
 \subsection{Defining New Fields}
-\texttt{yt} expects on-disk fields, fields generated on-demand and in-memory. 
+\texttt{yt} expects on-disk fields, fields generated on-demand and in-memory.
 Field can either be created before a dataset is loaded using \texttt{add\_field}:
-\texttt{def \_metal\_mass({\it field},{\it data})}\\
+\texttt{def \_metal\_mass(\textit{field},\textit{data})}\\
 \texttt{\hspace{4 mm} return data["metallicity"]*data["cell\_mass"]}\\
 \texttt{add\_field("metal\_mass", units='g', function=\_metal\_mass)}\\
 Or added to an existing dataset using \texttt{ds.add\_field}:
@@ -245,34 +245,34 @@
 
 \subsection{Slices and Projections}
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{slc = yt.SlicePlot(ds, {\it axis or normal vector}, {\it field}, {\it center=}, {\it width=}, {\it weight\_field=}, {\it additional parameters})} \textemdash\ Make a slice plot
-perpendicular to {\it axis} (specified via 'x', 'y', or 'z') or a normal vector for an off-axis slice of {\it field} weighted by {\it weight\_field} at (code-units) {\it center} with 
-{\it width} in code units or a (value, unit) tuple. Hint: try {\it yt.SlicePlot?} in IPython to see additional parameters.\\
-\texttt{slc.save({\it file\_prefix})} \textemdash\ Save the slice to a png with name prefix {\it file\_prefix}.
+\texttt{slc = yt.SlicePlot(ds, \textit{axis or normal vector}, \textit{field}, \textit{center=}, \textit{width=}, \textit{weight\_field=}, \textit{additional parameters})} \textemdash\ Make a slice plot
+perpendicular to \textit{axis} (specified via 'x', 'y', or 'z') or a normal vector for an off-axis slice of \textit{field} weighted by \textit{weight\_field} at (code-units) \textit{center} with
+\textit{width} in code units or a (value, unit) tuple. Hint: try \textit{yt.SlicePlot?} in IPython to see additional parameters.\\
+\texttt{slc.save(\textit{file\_prefix})} \textemdash\ Save the slice to a png with name prefix \textit{file\_prefix}.
 \texttt{.save()} works similarly for the commands below.\\
 
-\texttt{prj = yt.ProjectionPlot(ds, {\it axis}, {\it field}, {\it addit. params})} \textemdash\ Make a projection. \\
-\texttt{prj = yt.OffAxisProjectionPlot(ds, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
+\texttt{prj = yt.ProjectionPlot(ds, \textit{axis}, \textit{field}, \textit{addit. params})} \textemdash\ Make a projection. \\
+\texttt{prj = yt.OffAxisProjectionPlot(ds, \textit{normal}, \textit{fields}, \textit{center=}, \textit{width=}, \textit{depth=},\textit{north\_vector=},\textit{weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
 
 \subsection{Plot Annotations}
 \settowidth{\MyLen}{\texttt{multicol} }
-Plot callbacks are functions itemized in a registry that is attached to every plot object. They can be accessed and then called like \texttt{ prj.annotate\_velocity(factor=16, normalize=False)}. Most callbacks also accept a {\it plot\_args} dict that is fed to matplotlib annotator. \\
-\texttt{velocity({\it factor=},{\it scale=},{\it scale\_units=}, {\it normalize=})} \textemdash\ Uses field "x-velocity" to draw quivers\\
-\texttt{magnetic\_field({\it factor=},{\it scale=},{\it scale\_units=}, {\it normalize=})} \textemdash\ Uses field "Bx" to draw quivers\\
-\texttt{quiver({\it field\_x},{\it field\_y},{\it factor=},{\it scale=},{\it scale\_units=}, {\it normalize=})} \\
-\texttt{contour({\it field=},{\it ncont=},{\it factor=},{\it clim=},{\it take\_log=}, {\it additional parameters})} \textemdash Plots a number of contours {\it ncont} to interpolate {\it field} optionally using {\it take\_log}, upper and lower {\it c}ontour{\it lim}its and {\it factor} number of points in the interpolation.\\
-\texttt{grids({\it alpha=}, {\it draw\_ids=}, {\it periodic=}, {\it min\_level=}, {\it max\_level=})} \textemdash Add grid boundaries. \\
-\texttt{streamlines({\it field\_x},{\it field\_y},{\it factor=},{\it density=})}\\
-\texttt{clumps({\it clumplist})} \textemdash\ Generate {\it clumplist} using the clump finder and plot. \\
-\texttt{arrow({\it pos}, {\it code\_size})} Add an arrow at a {\it pos}ition. \\
-\texttt{point({\it pos}, {\it text})} \textemdash\ Add text at a {\it pos}ition. \\
-\texttt{marker({\it pos}, {\it marker=})} \textemdash\ Add a matplotlib-defined marker at a {\it pos}ition. \\
-\texttt{sphere({\it center}, {\it radius}, {\it text=})} \textemdash\ Draw a circle and append {\it text}.\\
-\texttt{hop\_circles({\it hop\_output}, {\it max\_number=}, {\it annotate=}, {\it min\_size=}, {\it max\_size=}, {\it font\_size=}, {\it print\_halo\_size=}, {\it fixed\_radius=}, {\it min\_mass=}, {\it print\_halo\_mass=}, {\it width=})} \textemdash\ Draw a halo, printing it's ID, mass, clipping halos depending on number of particles ({\it size}) and optionally fixing the drawn circle radius to be constant for all halos.\\
-\texttt{hop\_particles({\it hop\_output},{\it max\_number=},{\it p\_size=},\\
-{\it min\_size},{\it alpha=})} \textemdash\ Draw particle positions for member halos with a certain number of pixels per particle.\\
-\texttt{particles({\it width},{\it p\_size=},{\it col=}, {\it marker=}, {\it stride=}, {\it ptype=}, {\it stars\_only=}, {\it dm\_only=}, {\it minimum\_mass=}, {\it alpha=})}  \textemdash\  Draw particles of {\it p\_size} pixels in a slab of {\it width} with {\it col}or using a matplotlib {\it marker} plotting only every {\it stride} number of particles.\\
-\texttt{title({\it text})}\\
+Plot callbacks are functions itemized in a registry that is attached to every plot object. They can be accessed and then called like \texttt{ prj.annotate\_velocity(factor=16, normalize=False)}. Most callbacks also accept a \textit{plot\_args} dict that is fed to matplotlib annotator. \\
+\texttt{velocity(\textit{factor=},\textit{scale=},\textit{scale\_units=}, \textit{normalize=})} \textemdash\ Uses field "x-velocity" to draw quivers\\
+\texttt{magnetic\_field(\textit{factor=},\textit{scale=},\textit{scale\_units=}, \textit{normalize=})} \textemdash\ Uses field "Bx" to draw quivers\\
+\texttt{quiver(\textit{field\_x},\textit{field\_y},\textit{factor=},\textit{scale=},\textit{scale\_units=}, \textit{normalize=})} \\
+\texttt{contour(\textit{field=},\textit{ncont=},\textit{factor=},\textit{clim=},\textit{take\_log=}, \textit{additional parameters})} \textemdash Plots a number of contours \textit{ncont} to interpolate \textit{field} optionally using \textit{take\_log}, upper and lower \textit{c}ontour\textit{lim}its and \textit{factor} number of points in the interpolation.\\
+\texttt{grids(\textit{alpha=}, \textit{draw\_ids=}, \textit{periodic=}, \textit{min\_level=}, \textit{max\_level=})} \textemdash Add grid boundaries. \\
+\texttt{streamlines(\textit{field\_x},\textit{field\_y},\textit{factor=},\textit{density=})}\\
+\texttt{clumps(\textit{clumplist})} \textemdash\ Generate \textit{clumplist} using the clump finder and plot. \\
+\texttt{arrow(\textit{pos}, \textit{code\_size})} Add an arrow at a \textit{pos}ition. \\
+\texttt{point(\textit{pos}, \textit{text})} \textemdash\ Add text at a \textit{pos}ition. \\
+\texttt{marker(\textit{pos}, \textit{marker=})} \textemdash\ Add a matplotlib-defined marker at a \textit{pos}ition. \\
+\texttt{sphere(\textit{center}, \textit{radius}, \textit{text=})} \textemdash\ Draw a circle and append \textit{text}.\\
+\texttt{hop\_circles(\textit{hop\_output}, \textit{max\_number=}, \textit{annotate=}, \textit{min\_size=}, \textit{max\_size=}, \textit{font\_size=}, \textit{print\_halo\_size=}, \textit{fixed\_radius=}, \textit{min\_mass=}, \textit{print\_halo\_mass=}, \textit{width=})} \textemdash\ Draw a halo, printing it's ID, mass, clipping halos depending on number of particles (\textit{size}) and optionally fixing the drawn circle radius to be constant for all halos.\\
+\texttt{hop\_particles(\textit{hop\_output},\textit{max\_number=},\textit{p\_size=},\\
+\textit{min\_size},\textit{alpha=})} \textemdash\ Draw particle positions for member halos with a certain number of pixels per particle.\\
+\texttt{particles(\textit{width},\textit{p\_size=},\textit{col=}, \textit{marker=}, \textit{stride=}, \textit{ptype=}, \textit{stars\_only=}, \textit{dm\_only=}, \textit{minimum\_mass=}, \textit{alpha=})}  \textemdash\  Draw particles of \textit{p\_size} pixels in a slab of \textit{width} with \textit{col}or using a matplotlib \textit{marker} plotting only every \textit{stride} number of particles.\\
+\texttt{title(\textit{text})}\\
 
 \subsection{The $\sim$/.yt/ Directory}
 \settowidth{\MyLen}{\texttt{multicol} }
@@ -297,12 +297,12 @@
 
 
 \subsection{Parallel Analysis}
-\settowidth{\MyLen}{\texttt{multicol}} 
+\settowidth{\MyLen}{\texttt{multicol}}
 Nearly all of yt is parallelized using
-MPI.  The {\it mpi4py} package must be installed for parallelism in yt.  To
-install {\it pip install mpi4py} on the command line usually works.
+MPI\@.  The \textit{mpi4py} package must be installed for parallelism in yt.  To
+install \textit{pip install mpi4py} on the command line usually works.
 Execute python in parallel similar to this:\\
-{\it mpirun -n 12 python script.py}\\
+\textit{mpirun -n 12 python script.py}\\
 The file \texttt{script.py} must call the \texttt{yt.enable\_parallelism()} to
 turn on yt's parallelism.  If this doesn't happen, all cores will execute the
 same serial yt script.  This command may differ for each system on which you use
@@ -320,12 +320,12 @@
 \texttt{hg clone https://bitbucket.org/yt\_analysis/yt} \textemdash\ Clone a copy of yt. \\
 \texttt{hg status} \textemdash\ Files changed in working directory.\\
 \texttt{hg diff} \textemdash\ Print diff of all changed files in working directory. \\
-\texttt{hg diff -r{\it RevX} -r{\it RevY}} \textemdash\ Print diff of all changes between revision {\it RevX} and {\it RevY}.\\
+\texttt{hg diff -r\textit{RevX} -r\textit{RevY}} \textemdash\ Print diff of all changes between revision \textit{RevX} and \textit{RevY}.\\
 \texttt{hg log} \textemdash\ History of changes.\\
-\texttt{hg cat -r{\it RevX file}} \textemdash\ Print the contents of {\it file} from revision {\it RevX}.\\
+\texttt{hg cat -r\textit{RevX file}} \textemdash\ Print the contents of \textit{file} from revision \textit{RevX}.\\
 \texttt{hg heads} \textemdash\ Print all the current heads. \\
-\texttt{hg revert -r{\it RevX file}} \textemdash\ Revert {\it file} to revision {\it RevX}. On-disk changed version is
-moved to {\it file.orig}. \\
+\texttt{hg revert -r\textit{RevX file}} \textemdash\ Revert \textit{file} to revision \textit{RevX}. On-disk changed version is
+moved to \textit{file.orig}. \\
 \texttt{hg commit} \textemdash\ Commit changes to repository. \\
 \texttt{hg push} \textemdash\ Push changes to default remote repository. \\
 \texttt{hg pull} \textemdash\ Pull changes from default remote repository. \\

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -17,7 +17,7 @@
 #
 # By default this will install yt from source.
 #
-# If you experience problems, please visit the Help section at 
+# If you experience problems, please visit the Help section at
 # http://yt-project.org.
 #
 DEST_SUFFIX="yt-conda"
@@ -298,7 +298,7 @@
 
 if [ $INST_UNSTRUCTURED -eq 1 ]
 then
-  YT_DEPS+=('netcdf4')   
+  YT_DEPS+=('netcdf4')
 fi
 
 # Here is our dependency list for yt
@@ -361,7 +361,7 @@
 echo "yt and the Conda system are now installed in $DEST_DIR ."
 echo
 echo "You must now modify your PATH variable by prepending:"
-echo 
+echo
 echo "   $DEST_DIR/bin"
 echo
 echo "On Bash-style shells you can copy/paste the following command to "

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 doc/helper_scripts/code_support.py
--- a/doc/helper_scripts/code_support.py
+++ b/doc/helper_scripts/code_support.py
@@ -85,7 +85,7 @@
 print("|| . ||", end=' ')
 for c in code_names:
     print("%s || " % (c), end=' ')
-print() 
+print()
 
 for vn in vals:
     print("|| !%s ||" % (vn), end=' ')

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 doc/helper_scripts/run_recipes.py
--- a/doc/helper_scripts/run_recipes.py
+++ b/doc/helper_scripts/run_recipes.py
@@ -19,7 +19,7 @@
 CWD = os.getcwd()
 ytcfg["yt", "serialize"] = "False"
 PARALLEL_TEST = {"rockstar_nest": "3"}
-BLACKLIST = []
+BLACKLIST = ["opengl_ipython", "opengl_vr"]
 
 
 def prep_dirs():

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 doc/helper_scripts/table.py
--- a/doc/helper_scripts/table.py
+++ b/doc/helper_scripts/table.py
@@ -44,7 +44,7 @@
       "A bunch of illustrated examples of how to do things"),
      ("reference/index.html", "Reference Materials",
       "A list of all bundled fields, API documentation, the Change Log..."),
-     ("faq/index.html", "FAQ", 
+     ("faq/index.html", "FAQ",
       "Frequently Asked Questions: answered for you!")
   ]),
 ]

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 doc/helper_scripts/update_recipes.py
--- a/doc/helper_scripts/update_recipes.py
+++ b/doc/helper_scripts/update_recipes.py
@@ -66,7 +66,7 @@
             written = cond_output(output, written)
             ofn = "%s/%s_%s" % (ndir, fn, os.path.basename(ifn))
             open(ofn, "wb").write(open(ifn, "rb").read())
-            output.write(".. image:: _%s/%s_%s\n" % (fn, fn, os.path.basename(ifn)) + 
+            output.write(".. image:: _%s/%s_%s\n" % (fn, fn, os.path.basename(ifn)) +
                          "   :width: 240\n" +
                          "   :target: ../_images/%s_%s\n" % (fn, os.path.basename(ifn))
                         )

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1,13 +1,13 @@
 #
 # Hi there!  Welcome to the yt installation script.
 #
-# First things first, if you experience problems, please visit the Help 
+# First things first, if you experience problems, please visit the Help
 # section at http://yt-project.org.
 #
 # This script is designed to create a fully isolated Python installation
 # with the dependencies you need to run yt.
 #
-# There are a few options, but you only need to set *one* of them, which is 
+# There are a few options, but you only need to set *one* of them, which is
 # the next one, DEST_DIR:
 
 DEST_SUFFIX="yt-`uname -m`"
@@ -307,7 +307,7 @@
         echo "  * gcc-{,c++,gfortran}"
         echo "  * make"
         echo "  * patch"
-        echo 
+        echo
         echo "You can accomplish this by executing:"
         echo "$ sudo yum install gcc gcc-c++ gcc-gfortran make patch zip"
         echo "$ sudo yum install ncurses-devel uuid-devel openssl-devel readline-devel"
@@ -495,7 +495,7 @@
 if [ $INST_PY3 -eq 1 ]
 then
      PYTHON_EXEC='python3.4'
-else 
+else
      PYTHON_EXEC='python2.7'
 fi
 
@@ -513,7 +513,7 @@
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
     BUILD_ARGS=""
-    if [[ $LIB =~ .*mercurial.* ]] 
+    if [[ $LIB =~ .*mercurial.* ]]
     then
         PYEXE="python2.7"
     else
@@ -620,9 +620,9 @@
 CYTHON='Cython-0.22'
 PYX='PyX-0.12.1'
 BZLIB='bzip2-1.0.6'
-FREETYPE_VER='freetype-2.4.12' 
+FREETYPE_VER='freetype-2.4.12'
 H5PY='h5py-2.5.0'
-HDF5='hdf5-1.8.14' 
+HDF5='hdf5-1.8.14'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
 MATPLOTLIB='matplotlib-1.4.3'
@@ -880,7 +880,7 @@
 
 # This fixes problems with gfortran linking.
 unset LDFLAGS
- 
+
 echo "Installing pip"
 ( ${GETFILE} https://bootstrap.pypa.io/get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
 ( ${DEST_DIR}/bin/${PYTHON_EXEC} get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -1006,7 +1006,7 @@
 cd $MY_PWD
 
 if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import readline" 2>&1 )>> ${LOG_FILE}) || \
-    [[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]] 
+    [[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]]
 then
     if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
     then

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -40,7 +40,7 @@
         padding-bottom: 10px;
     }
     /* since 3.1.0 */
-    .navbar-collapse.collapse.in { 
+    .navbar-collapse.collapse.in {
         display: block!important;
     }
     .collapsing {
@@ -48,7 +48,7 @@
     }
 }
 
-/* 
+/*
 
 Sphinx code literals conflict with the notebook code tag, so we special-case
 literals that are inside text.
@@ -56,7 +56,7 @@
 */
 
 p code {
-    color:  #d14;    
+    color:  #d14;
     white-space: nowrap;
     font-size: 90%;
     background-color: #f9f2f4;
@@ -93,16 +93,16 @@
 */
 
 *[id]:before :not(p) {
-  display: block; 
-  content: " "; 
-  margin-top: -45px; 
-  height: 45px; 
-  visibility: hidden; 
+  display: block;
+  content: " ";
+  margin-top: -45px;
+  height: 45px;
+  visibility: hidden;
 }
 
 /*
 
-Make tables span only half the page. 
+Make tables span only half the page.
 
 */
 

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 doc/source/about/index.rst
--- a/doc/source/about/index.rst
+++ b/doc/source/about/index.rst
@@ -12,10 +12,10 @@
 -----------
 
 yt is a toolkit for analyzing and visualizing quantitative data.  Originally
-written to analyze 3D grid-based astrophysical simulation data, 
+written to analyze 3D grid-based astrophysical simulation data,
 it has grown to handle any kind of data represented in a 2D or 3D volume.
-yt is an Python-based open source project and is open for anyone to use or 
-contribute code.  The entire source code and history is available to all 
+yt is an Python-based open source project and is open for anyone to use or
+contribute code.  The entire source code and history is available to all
 at https://bitbucket.org/yt_analysis/yt .
 
 .. _who-is-yt:
@@ -23,16 +23,16 @@
 Who is yt?
 ----------
 
-As an open-source project, yt has a large number of user-developers.  
-In September of 2014, the yt developer community collectively decided to endow 
-the title of *member* on individuals who had contributed in a significant way 
-to the project.  For a list of those members and a description of their 
-contributions to the code, see 
+As an open-source project, yt has a large number of user-developers.
+In September of 2014, the yt developer community collectively decided to endow
+the title of *member* on individuals who had contributed in a significant way
+to the project.  For a list of those members and a description of their
+contributions to the code, see
 `our members website. <http://yt-project.org/members.html>`_
 
-For an up-to-date list of everyone who has contributed to the yt codebase, 
-see the current `CREDITS <http://bitbucket.org/yt_analysis/yt/src/yt/CREDITS>`_ file.  
-For a more detailed breakup of contributions made by individual users, see out 
+For an up-to-date list of everyone who has contributed to the yt codebase,
+see the current `CREDITS <http://bitbucket.org/yt_analysis/yt/src/yt/CREDITS>`_ file.
+For a more detailed breakup of contributions made by individual users, see out
 `Open HUB page <https://www.openhub.net/p/yt_amr/contributors?query=&sort=commits>`_.
 
 History of yt
@@ -40,17 +40,17 @@
 
 yt was originally begun by Matthew Turk in 2007 in the course of his graduate
 studies in computational astrophysics.  The code was developed
-as a simple data-reader and exporter for grid-based hydrodynamical simulation 
-data outputs from the *Enzo* code.  Over the next few years, he invited 
+as a simple data-reader and exporter for grid-based hydrodynamical simulation
+data outputs from the *Enzo* code.  Over the next few years, he invited
 collaborators and friends to contribute and use yt.  As the community grew,
-so did the capabilities of yt.  It is now a community-developed project with 
-contributions from many people, the hospitality of several institutions, and 
-benefiting from numerous grants.  With this community-driven approach 
-and contributions from a sizeable population of developers, it has evolved 
-into a fully-featured toolkit for analysis and visualization of 
-multidimensional data.  It relies on no proprietary software -- although it 
-can be and has been extended to interface with proprietary software and 
-libraries -- and has been designed from the ground up to enable users to be 
+so did the capabilities of yt.  It is now a community-developed project with
+contributions from many people, the hospitality of several institutions, and
+benefiting from numerous grants.  With this community-driven approach
+and contributions from a sizeable population of developers, it has evolved
+into a fully-featured toolkit for analysis and visualization of
+multidimensional data.  It relies on no proprietary software -- although it
+can be and has been extended to interface with proprietary software and
+libraries -- and has been designed from the ground up to enable users to be
 as immersed in the data as they desire.
 
 How do I contact yt?
@@ -58,7 +58,7 @@
 
 If you have any questions about the code, please contact the `yt users email
 list <http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org>`_.  If
-you're having other problems, please follow the steps in 
+you're having other problems, please follow the steps in
 :ref:`asking-for-help`.
 
 How do I cite yt?
@@ -70,7 +70,7 @@
 entry: ::
 
    @ARTICLE{2011ApJS..192....9T,
-      author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and 
+      author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and
    	{Skillman}, S.~W. and {Abel}, T. and {Norman}, M.~L.},
        title = "{yt: A Multi-code Analysis Toolkit for Astrophysical Simulation Data}",
      journal = {\apjs},

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -1,76 +1,119 @@
 .. _absorption_spectrum:
 
-Absorption Spectrum
-===================
+Creating Absorption Spectra
+===========================
 
 .. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
 
-Absorption line spectra, such as shown below, can be made with data created 
-by the (:ref:`light-ray-generator`).  For each element of the ray, column 
-densities are calculated multiplying the number density within a grid cell 
-with the path length of the ray through the cell.  Line profiles are 
-generated using a voigt profile based on the temperature field.  The lines 
-are then shifted according to the redshift recorded by the light ray tool 
-and (optionally) the peculiar velocity of gas along the ray.  Inclusion of the 
-peculiar velocity requires setting ``use_peculiar_velocity`` to True in the call to 
-:meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`.
+Absorption line spectra are spectra generated using bright background sources
+to illuminate tenuous foreground material and are primarily used in studies
+of the circumgalactic medium and intergalactic medium.  These spectra can
+be created using the
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+and
+:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`
+analysis modules.
 
-The spectrum generator will output a file containing the wavelength and 
-normalized flux.  It will also output a text file listing all important lines.
+The 
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum` class
+and its workhorse method
+:meth:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum.make_spectrum`
+return two arrays, one with wavelengths, the other with the normalized
+flux values at each of the wavelength values.  It can also output a text file
+listing all important lines.
+
+For example, here is an absorption spectrum for the wavelength range from 900 
+to 1800 Angstroms made with a light ray extending from z = 0 to z = 0.4:
 
 .. image:: _images/spectrum_full.png
    :width: 500
 
-An absorption spectrum for the wavelength range from 900 to 1800 Angstroms 
-made with a light ray extending from z = 0 to z = 0.4.
+And a zoom-in on the 1425-1450 Angstrom window:
 
 .. image:: _images/spectrum_zoom.png
    :width: 500
 
-A zoom-in of the above spectrum.
+Method for Creating Absorption Spectra
+--------------------------------------
 
-Creating an Absorption Spectrum
--------------------------------
+Once a
+:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`
+has been created traversing a dataset using the :ref:`light-ray-generator`,
+a series of arrays store the various fields of the gas parcels (represented
+as cells) intersected along the ray.
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+steps through each element of the
+:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`'s
+arrays and calculates the column density for desired ion by multiplying its
+number density with the path length through the cell.  Using these column
+densities along with temperatures to calculate thermal broadening, voigt
+profiles are deposited on to a featureless background spectrum.  By default,
+the peculiar velocity of the gas is included as a doppler redshift in addition
+to any cosmological redshift of the data dump itself.
 
-To instantiate an AbsorptionSpectrum object, the arguments required are the 
-minimum and maximum wavelengths, and the number of wavelength bins.
+Subgrid Deposition
+^^^^^^^^^^^^^^^^^^
+
+For features not resolved (i.e. possessing narrower width than the spectral
+resolution),
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+performs subgrid deposition.  The subgrid deposition algorithm creates a number
+of smaller virtual bins, by default the width of the virtual bins is 1/10th
+the width of the spectral feature.  The Voigt profile is then deposited
+into these virtual bins where it is resolved, and then these virtual bins
+are numerically integrated back to the resolution of the original spectral bin
+size, yielding accurate equivalent widths values.
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+informs the user how many spectral features are deposited in this fashion.
+
+Tutorial on Creating an Absorption Spectrum
+-------------------------------------------
+
+Initializing `AbsorptionSpectrum` Class
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To instantiate an
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+object, the arguments required are the
+minimum and maximum wavelengths (assumed to be in Angstroms), and the number
+of wavelength bins to span this range (including the endpoints)
 
 .. code-block:: python
 
   from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 
-  sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
+  sp = AbsorptionSpectrum(900.0, 1800.0, 10001)
 
 Adding Features to the Spectrum
--------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-Absorption lines and continuum features can then be added to the spectrum.  
-To add a line, you must know some properties of the line: the rest wavelength, 
-f-value, gamma value, and the atomic mass in amu of the atom.  That line must 
+Absorption lines and continuum features can then be added to the spectrum.
+To add a line, you must know some properties of the line: the rest wavelength,
+f-value, gamma value, and the atomic mass in amu of the atom.  That line must
 be tied in some way to a field in the dataset you are loading, and this field
-must be added to the LightRay object when it is created.  Below, we will 
-add the H Lyman-alpha line, which is tied to the neutral hydrogen field 
+must be added to the LightRay object when it is created.  Below, we will
+add the H Lyman-alpha line, which is tied to the neutral hydrogen field
 ('H_number_density').
 
 .. code-block:: python
-  
+
   my_label = 'HI Lya'
   field = 'H_number_density'
   wavelength = 1215.6700 # Angstroms
   f_value = 4.164E-01
   gamma = 6.265e+08
   mass = 1.00794
-  
+
   sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10)
 
-In the above example, the *field* argument tells the spectrum generator which 
-field from the ray data to use to calculate the column density.  The 
-``label_threshold`` keyword tells the spectrum generator to add all lines 
-above a column density of 10 :superscript:`10` cm :superscript:`-2` to the 
-text line list.  If None is provided, as is the default, no lines of this 
-type will be added to the text list.
+In the above example, the *field* argument tells the spectrum generator which
+field from the ray data to use to calculate the column density.  The
+``label_threshold`` keyword tells the spectrum generator to add all lines
+above a column density of 10 :superscript:`10` cm :superscript:`-2` to the
+text line list output at the end.  If None is provided, as is the default,
+no lines of this type will be added to the text list.
 
-Continuum features with optical depths that follow a power law can also be 
+Continuum features with optical depths that follow a power law can also be
 added.  Like adding lines, you must specify details like the wavelength
 and the field in the dataset and LightRay that is tied to this feature.
 Below, we will add H Lyman continuum.
@@ -82,29 +125,29 @@
   wavelength = 912.323660 # Angstroms
   normalization = 1.6e17
   index = 3.0
-  
+
   sp.add_continuum(my_label, field, wavelength, normalization, index)
 
 Making the Spectrum
--------------------
+^^^^^^^^^^^^^^^^^^^
 
-Once all the lines and continuum are added, it is time to make a spectrum out 
+Once all the lines and continuum are added, it is time to make a spectrum out
 of some light ray data.
 
 .. code-block:: python
 
-  wavelength, flux = sp.make_spectrum('lightray.h5', 
-                                      output_file='spectrum.fits', 
-                                      line_list_file='lines.txt',
-                                      use_peculiar_velocity=True)
+  wavelength, flux = sp.make_spectrum('lightray.h5',
+                                      output_file='spectrum.fits',
+                                      line_list_file='lines.txt')
 
-A spectrum will be made using the specified ray data and the wavelength and 
-flux arrays will also be returned.  If ``use_peculiar_velocity`` is set to 
-False, the lines will only be shifted according to the redshift.
+A spectrum will be made using the specified ray data and the wavelength and
+flux arrays will also be returned.  If you set the optional
+``use_peculiar_velocity`` keyword to False, the lines will not incorporate
+doppler redshifts to shift the deposition of the line features.
 
-Three output file formats are supported for writing out the spectrum: fits, 
-hdf5, and ascii.  The file format used is based on the extension provided 
-in the ``output_file`` keyword: ``.fits`` for a fits file, 
+Three output file formats are supported for writing out the spectrum: fits,
+hdf5, and ascii.  The file format used is based on the extension provided
+in the ``output_file`` keyword: ``.fits`` for a fits file,
 ``.h5`` for an hdf5 file, and anything else for an ascii file.
 
 .. note:: To write out a fits file, you must install the `astropy <http://www.astropy.org>`_ python library in order to access the astropy.io.fits module.  You can usually do this by simply running `pip install astropy` at the command line.
@@ -112,29 +155,30 @@
 Generating Spectra in Parallel
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-The spectrum generator can be run in parallel simply by following the procedures 
-laid out in :ref:`parallel-computation` for running yt scripts in parallel.  
-Spectrum generation is parallelized using a multi-level strategy where each 
-absorption line is deposited by a different processor.  If the number of available 
-processors is greater than the number of lines, then the deposition of 
-individual lines will be divided over multiple processors.
+The `AbsorptionSpectrum` analysis module can be run in parallel simply by
+following the procedures laid out in :ref:`parallel-computation` for running
+yt scripts in parallel.  Spectrum generation is parallelized using a multi-level
+strategy where each absorption line is deposited by a different processor.
+If the number of available processors is greater than the number of lines,
+then the deposition of individual lines will be divided over multiple
+processors.
 
-Fitting an Absorption Spectrum
-------------------------------
+Fitting Absorption Spectra
+==========================
 
 .. sectionauthor:: Hilary Egan <hilary.egan at colorado.edu>
 
 This tool can be used to fit absorption spectra, particularly those
 generated using the (``AbsorptionSpectrum``) tool. For more details
 on its uses and implementation please see (`Egan et al. (2013)
-<http://arxiv.org/abs/1307.2244>`_). If you find this tool useful we 
+<http://arxiv.org/abs/1307.2244>`_). If you find this tool useful we
 encourage you to cite accordingly.
 
 Loading an Absorption Spectrum
 ------------------------------
 
-To load an absorption spectrum created by 
-(:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum``), 
+To load an absorption spectrum created by
+(:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum``),
 we specify the output file name. It is advisable to use either an .h5
 or .fits file, rather than an ascii file to save the spectrum as rounding
 errors produced in saving to a ascii file will negatively impact fit quality.
@@ -149,7 +193,7 @@
 Specifying Species Properties
 -----------------------------
 
-Before fitting a spectrum, you must specify the properties of all the 
+Before fitting a spectrum, you must specify the properties of all the
 species included when generating the spectrum.
 
 The physical properties needed for each species are the rest wavelength,
@@ -160,7 +204,7 @@
 
 To fine tune the fitting procedure and give results in a minimal
 number of optimizing steps, we specify expected maximum and minimum
-values for the column density, doppler parameter, and redshift. These 
+values for the column density, doppler parameter, and redshift. These
 values can be well outside the range of expected values for a typical line
 and are mostly to prevent the algorithm from fitting to negative values
 or becoming numerically unstable.
@@ -204,7 +248,7 @@
 --------------------------
 
 After loading a spectrum and specifying the properties of the species
-used to generate the spectrum, an appropriate fit can be generated. 
+used to generate the spectrum, an appropriate fit can be generated.
 
 .. code-block:: python
 
@@ -219,19 +263,19 @@
 recommended to fit species the generate multiple lines first, as a fit
 will only be accepted if all of the lines are fit appropriately using
 a single set of parameters. At the moment no cross correlation between
-lines of different species is performed. 
+lines of different species is performed.
 
-The parameters of the lines that are needed to fit the spectrum are contained 
+The parameters of the lines that are needed to fit the spectrum are contained
 in the ``fitted_lines`` variable. Each species given in ``orderFits`` will
-be a key in the ``fitted_lines`` dictionary. The entry for each species 
-key will be another dictionary containing entries for 'N','b','z', and 
+be a key in the ``fitted_lines`` dictionary. The entry for each species
+key will be another dictionary containing entries for 'N','b','z', and
 'group#' which are the column density, doppler parameter, redshift,
-and associate line complex respectively. The i :superscript:`th` line 
-of a given species is then given by the parameters ``N[i]``, ``b[i]``, 
+and associate line complex respectively. The i :superscript:`th` line
+of a given species is then given by the parameters ``N[i]``, ``b[i]``,
 and ``z[i]`` and is part of the same complex (and was fitted at the same time)
 as all lines with the same group number as ``group#[i]``.
 
-The ``fitted_flux`` is an ndarray of the same size as ``flux`` and 
+The ``fitted_flux`` is an ndarray of the same size as ``flux`` and
 ``wavelength`` that contains the cumulative absorption spectrum generated
 by the lines contained in ``fitted_lines``.
 
@@ -250,8 +294,8 @@
 
 .. sectionauthor:: Hilary Egan <hilary.egan at colorado.edu>
 
-To generate a fit for a spectrum 
-:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.generate_total_fit` 
+To generate a fit for a spectrum
+:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.generate_total_fit`
 is called.
 This function controls the identification of line complexes, the fit
 of a series of absorption lines for each appropriate species, checks of
@@ -260,14 +304,14 @@
 Finding Line Complexes
 ----------------------
 
-Line complexes are found using the 
+Line complexes are found using the
 :func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.find_complexes`
-function. The process by which line complexes are found involves walking 
-through the array of flux in order from minimum to maximum wavelength, and 
-finding series of spatially contiguous cells whose flux is less than some 
-limit.  These regions are then checked in terms of an additional flux limit 
-and size.  The bounds of all the passing regions are then listed and returned. 
-Those bounds that cover an exceptionally large region of wavelength space will 
+function. The process by which line complexes are found involves walking
+through the array of flux in order from minimum to maximum wavelength, and
+finding series of spatially contiguous cells whose flux is less than some
+limit.  These regions are then checked in terms of an additional flux limit
+and size.  The bounds of all the passing regions are then listed and returned.
+Those bounds that cover an exceptionally large region of wavelength space will
 be broken up if a suitable cut point is found. This method is only appropriate
 for noiseless spectra.
 
@@ -280,25 +324,25 @@
 unstable when optimizing.
 
 The ``fitLim`` parameter controls what is the maximum flux that the trough
-of the region can have and still be considered a line complex. This 
+of the region can have and still be considered a line complex. This
 effectively controls the sensitivity to very low column absorbers. Default
-value is ``fitLim`` = 0.99. If a region is identified where the flux of the 
+value is ``fitLim`` = 0.99. If a region is identified where the flux of the
 trough is greater than this value, the region is simply ignored.
 
-The ``minLength`` parameter controls the minimum number of array elements 
+The ``minLength`` parameter controls the minimum number of array elements
 that an identified region must have. This value must be greater than or
 equal to 3 as there are a minimum of 3 free parameters that must be fit.
 Default is ``minLength`` = 3.
 
 The ``maxLength`` parameter controls the maximum number of array elements
 that an identified region can have before it is split into separate regions.
-Default is ``maxLength`` = 1000. This should be adjusted based on the 
+Default is ``maxLength`` = 1000. This should be adjusted based on the
 resolution of the spectrum to remain appropriate. The value correspond
-to a wavelength of roughly 50 angstroms. 
+to a wavelength of roughly 50 angstroms.
 
 The ``splitLim`` parameter controls how exceptionally large regions are split.
 When such a region is identified by having more array elements than
-``maxLength``, the point of maximum flux (or minimum absorption) in the 
+``maxLength``, the point of maximum flux (or minimum absorption) in the
 middle two quartiles is identified. If that point has a flux greater than
 or equal to ``splitLim``, then two separate complexes are created: one from
 the lower wavelength edge to the minimum absorption point and the other from
@@ -309,7 +353,7 @@
 Fitting a Line Complex
 ----------------------
 
-After a complex is identified, it is fitted by iteratively adding and 
+After a complex is identified, it is fitted by iteratively adding and
 optimizing a set of Voigt Profiles for a particular species until the
 region is considered successfully fit. The optimizing is accomplished
 using scipy's least squares optimizer. This requires an initial estimate
@@ -326,36 +370,36 @@
 smaller initial guess is given. These values are chosen to make optimization
 faster and more stable by being closer to the actual value, but the final
 results of fitting should not depend on them as they merely provide a
-starting point. 
+starting point.
 
-After the parameters for a line are optimized for the first time, the 
-optimized parameters are then used for the initial guess on subsequent 
-iterations with more lines. 
+After the parameters for a line are optimized for the first time, the
+optimized parameters are then used for the initial guess on subsequent
+iterations with more lines.
 
-The complex is considered successfully fit when the sum of the squares of 
+The complex is considered successfully fit when the sum of the squares of
 the difference between the flux generated from the fit and the desired flux
 profile is less than ``errBound``. ``errBound`` is related to the optional
-parameter to 
+parameter to
 :meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.generate_total_fit`,
-``maxAvgError`` by the number of array elements in the region such that 
+``maxAvgError`` by the number of array elements in the region such that
 ``errBound`` = number of elements * ``maxAvgError``.
 
-There are several other conditions under which the cycle of adding and 
+There are several other conditions under which the cycle of adding and
 optimizing lines will halt. If the error of the optimized fit from adding
 a line is an order of magnitude worse than the error of the fit without
-that line, then it is assumed that the fitting has become unstable and 
+that line, then it is assumed that the fitting has become unstable and
 the latest line is removed. Lines are also prevented from being added if
 the total number of lines is greater than the number of elements in the flux
 array being fit divided by 3. This is because there must not be more free
-parameters in a fit than the number of points to constrain them. 
+parameters in a fit than the number of points to constrain them.
 
 Checking Fit Results
 --------------------
 
 After an acceptable fit for a region is determined, there are several steps
-the algorithm must go through to validate the fits. 
+the algorithm must go through to validate the fits.
 
-First, the parameters must be in a reasonable range. This is a check to make 
+First, the parameters must be in a reasonable range. This is a check to make
 sure that the optimization did not become unstable and generate a fit that
 diverges wildly outside the region where the fit was performed. This way, even
 if particular complex cannot be fit, the rest of the spectrum fitting still
@@ -363,13 +407,13 @@
 in the species parameter dictionary. These are merely broad limits that will
 prevent numerical instability rather than physical limits.
 
-In cases where a single species generates multiple lines (as in the OVI 
+In cases where a single species generates multiple lines (as in the OVI
 doublet), the fits are then checked for higher wavelength lines. Originally
 the fits are generated only considering the lowest wavelength fit to a region.
 This is because we perform the fitting of complexes in order from the lowest
 wavelength to the highest, so any contribution to a complex being fit must
 come from the lower wavelength as the higher wavelength contributions would
-already have been subtracted out after fitting the lower wavelength. 
+already have been subtracted out after fitting the lower wavelength.
 
 Saturated Lyman Alpha Fitting Tools
 -----------------------------------
@@ -380,8 +424,8 @@
 The basic approach is to simply try a much wider range of initial parameter
 guesses in order to find the true optimization minimum, rather than getting
 stuck in a local minimum. A set of hard coded initial parameter guesses
-for Lyman alpha lines is given by the function 
+for Lyman alpha lines is given by the function
 :func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.get_test_lines`.
 Also included in these parameter guesses is an an initial guess of a high
-column cool line overlapping a lower column warm line, indictive of a 
+column cool line overlapping a lower column warm line, indictive of a
 broad Lyman alpha (BLA) absorber.

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -3,17 +3,17 @@
 Clump Finding
 =============
 
-The clump finder uses a contouring algorithm to identified topologically 
-disconnected structures within a dataset.  This works by first creating a 
-single contour over the full range of the contouring field, then continually 
-increasing the lower value of the contour until it reaches the maximum value 
-of the field.  As disconnected structures are identified as separate contours, 
-the routine continues recursively through each object, creating a hierarchy of 
-clumps.  Individual clumps can be kept or removed from the hierarchy based on 
-the result of user-specified functions, such as checking for gravitational 
+The clump finder uses a contouring algorithm to identified topologically
+disconnected structures within a dataset.  This works by first creating a
+single contour over the full range of the contouring field, then continually
+increasing the lower value of the contour until it reaches the maximum value
+of the field.  As disconnected structures are identified as separate contours,
+the routine continues recursively through each object, creating a hierarchy of
+clumps.  Individual clumps can be kept or removed from the hierarchy based on
+the result of user-specified functions, such as checking for gravitational
 boundedness.  A sample recipe can be found in :ref:`cookbook-find_clumps`.
 
-The clump finder requires a data object (see :ref:`data-objects`) and a field 
+The clump finder requires a data object (see :ref:`data-objects`) and a field
 over which the contouring is to be performed.
 
 .. code:: python
@@ -28,11 +28,11 @@
 
    master_clump = Clump(data_source, ("gas", "density"))
 
-At this point, every isolated contour will be considered a clump, 
-whether this is physical or not.  Validator functions can be added to 
-determine if an individual contour should be considered a real clump.  
-These functions are specified with the ``Clump.add_validator`` function.  
-Current, two validators exist: a minimum number of cells and gravitational 
+At this point, every isolated contour will be considered a clump,
+whether this is physical or not.  Validator functions can be added to
+determine if an individual contour should be considered a real clump.
+These functions are specified with the ``Clump.add_validator`` function.
+Current, two validators exist: a minimum number of cells and gravitational
 boundedness.
 
 .. code:: python
@@ -41,9 +41,9 @@
 
    master_clump.add_validator("gravitationally_bound", use_particles=False)
 
-As many validators as desired can be added, and a clump is only kept if all 
-return True.  If not, a clump is remerged into its parent.  Custom validators 
-can easily be added.  A validator function must only accept a ``Clump`` object 
+As many validators as desired can be added, and a clump is only kept if all
+return True.  If not, a clump is remerged into its parent.  Custom validators
+can easily be added.  A validator function must only accept a ``Clump`` object
 and either return True or False.
 
 .. code:: python
@@ -52,16 +52,16 @@
        return (clump["gas", "cell_mass"].sum() >= min_mass)
    add_validator("minimum_gas_mass", _minimum_gas_mass)
 
-The ``add_validator`` function adds the validator to a registry that can 
-be accessed by the clump finder.  Then, the validator can be added to the 
+The ``add_validator`` function adds the validator to a registry that can
+be accessed by the clump finder.  Then, the validator can be added to the
 clump finding just like the others.
 
 .. code:: python
 
    master_clump.add_validator("minimum_gas_mass", ds.quan(1.0, "Msun"))
 
-The clump finding algorithm accepts the ``Clump`` object, the initial minimum 
-and maximum of the contouring field, and the step size.  The lower value of the 
+The clump finding algorithm accepts the ``Clump`` object, the initial minimum
+and maximum of the contouring field, and the step size.  The lower value of the
 contour finder will be continually multiplied by the step size.
 
 .. code:: python
@@ -71,9 +71,9 @@
    step = 2.0
    find_clumps(master_clump, c_min, c_max, step)
 
-After the clump finding has finished, the master clump will represent the top 
-of a hierarchy of clumps.  The ``children`` attribute within a ``Clump`` object 
-contains a list of all sub-clumps.  Each sub-clump is also a ``Clump`` object 
+After the clump finding has finished, the master clump will represent the top
+of a hierarchy of clumps.  The ``children`` attribute within a ``Clump`` object
+contains a list of all sub-clumps.  Each sub-clump is also a ``Clump`` object
 with its own ``children`` attribute, and so on.
 
 A number of helper routines exist for examining the clump hierarchy.
@@ -96,15 +96,15 @@
    print(leaf_clumps[0]["gas", "density"])
    print(leaf_clumps[0].quantities.total_mass())
 
-The writing functions will write out a series or properties about each 
-clump by default.  Additional properties can be appended with the 
+The writing functions will write out a series or properties about each
+clump by default.  Additional properties can be appended with the
 ``Clump.add_info_item`` function.
 
 .. code:: python
 
    master_clump.add_info_item("total_cells")
 
-Just like the validators, custom info items can be added by defining functions 
+Just like the validators, custom info items can be added by defining functions
 that minimally accept a ``Clump`` object and return a string to be printed.
 
 .. code:: python
@@ -121,16 +121,16 @@
 
    master_clump.add_info_item("mass_weighted_jeans_mass")
 
-By default, the following info items are activated: **total_cells**, 
-**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**, 
-**max_grid_level**, **min_number_density**, **max_number_density**, and 
+By default, the following info items are activated: **total_cells**,
+**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**,
+**max_grid_level**, **min_number_density**, **max_number_density**, and
 **distance_to_main_clump**.
 
 Clumps can be visualized using the ``annotate_clumps`` callback.
 
 .. code:: python
 
-   prj = yt.ProjectionPlot(ds, 2, ("gas", "density"), 
+   prj = yt.ProjectionPlot(ds, 2, ("gas", "density"),
                            center='c', width=(20,'kpc'))
    prj.annotate_clumps(leaf_clumps)
    prj.save('clumps')

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 doc/source/analyzing/analysis_modules/cosmology_calculator.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/cosmology_calculator.rst
@@ -0,0 +1,75 @@
+.. _cosmology-calculator:
+
+Cosmology Calculator
+====================
+
+The cosmology calculator can be used to calculate cosmological distances and
+times given a set of cosmological parameters.  A cosmological dataset, `ds`,
+will automatically have a cosmology calculator configured with the correct
+parameters associated with it as `ds.cosmology`.  A standalone
+:class:`~yt.utilities.cosmology.Cosmology` calculator object can be created
+in the following way:
+
+.. code-block:: python
+
+   from yt.utilities.cosmology import Cosmology
+
+   co = Cosmology(hubble_constant=0.7, omega_matter=0.3,
+                  omega_lambda=0.7, omega_curvature=0.0)
+
+Once created, various distance calculations as well as conversions between
+redshift and time are available:
+
+.. notebook-cell::
+
+   from yt.utilities.cosmology import Cosmology
+
+   co = Cosmology(hubble_constant=0.7, omega_matter=0.3,
+                  omega_lambda=0.7, omega_curvature=0.0)
+
+   # Hubble distance (c / h)
+   print("hubble distance", co.hubble_distance())
+
+   # distance from z = 0 to 0.5
+   print("comoving radial distance", co.comoving_radial_distance(0, 0.5).in_units("Mpc/h"))
+
+   # transverse distance
+   print("transverse distance", co.comoving_transverse_distance(0, 0.5).in_units("Mpc/h"))
+
+   # comoving volume
+   print("comoving volume", co.comoving_volume(0, 0.5).in_units("Gpc**3"))
+
+   # angulare diameter distance
+   print("angular diameter distance", co.angular_diameter_distance(0, 0.5).in_units("Mpc/h"))
+
+   # angular scale
+   print("angular scale", co.angular_scale(0, 0.5).in_units("Mpc/degree"))
+
+   # luminosity distance
+   print("luminosity distance", co.luminosity_distance(0, 0.5).in_units("Mpc/h"))
+
+   # time between two redshifts
+   print("lookback time", co.lookback_time(0, 0.5).in_units("Gyr"))
+
+   # age of the Universe at a given redshift
+   print("hubble time", co.hubble_time(0).in_units("Gyr"))
+
+   # critical density
+   print("critical density", co.critical_density(0))
+
+   # Hubble parameter at a given redshift
+   print("hubble parameter", co.hubble_parameter(0).in_units("km/s/Mpc"))
+
+   # convert time after Big Bang to redshift
+   my_t = co.quan(8, "Gyr")
+   print("z from t", co.z_from_t(my_t))
+
+   # convert redshift to time after Big Bang (same as Hubble time)
+   print("t from z", co.t_from_z(0.5).in_units("Gyr"))
+
+Note, that all distances returned are comoving distances.  All of the above
+functions accept scalar values and arrays.  The helper functions, `co.quan`
+and `co.arr` exist to create unitful `YTQuantities` and `YTArray` with the
+unit registry of the cosmology calculator.  For more information on the usage
+and meaning of each calculation, consult the reference documentation at
+:ref:`cosmology-calculator-ref`.

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -91,7 +91,7 @@
 The center of mass would be the same one as returned by the halo
 finder.  The A, B, C are the largest to smallest magnitude of the
 ellipsoid's semi-principle axes. "e0" is the largest semi-principle
-axis vector direction that would have magnitude A but normalized.  
+axis vector direction that would have magnitude A but normalized.
 The "tilt" is an angle measured in radians.  It can be best described
 as after the rotation about the z-axis to align e0 to x in the x-y
 plane, and then rotating about the y-axis to align e0 completely to
@@ -128,7 +128,7 @@
 Since this is a first attempt, there are many drawbacks and corners
 cut.  Many things listed here will be amended when I have time.
 
-* The ellipsoid 3D container like the boolean object, do not contain 
+* The ellipsoid 3D container like the boolean object, do not contain
   particle position and velocity information.
 * This currently assume periodic boundary condition, so if an
   ellipsoid center is at the edge, it will return part of the opposite
@@ -136,7 +136,7 @@
   periodicity in the future.
 * This method gives a minimalistic ellipsoid centered around the
   center of mass that contains all the particles, but sometimes people
-  prefer an inertial tensor triaxial ellipsoid described in 
+  prefer an inertial tensor triaxial ellipsoid described in
   `Dubinski, Carlberg 1991
   <http://adsabs.harvard.edu/abs/1991ApJ...378..496D>`_.  I have that
   method composed but it is not fully tested yet.

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -7,21 +7,21 @@
 ----------------------
 
 In yt 3.0, operations relating to the analysis of halos (halo finding,
-merger tree creation, and individual halo analysis) are all brought 
+merger tree creation, and individual halo analysis) are all brought
 together into a single framework. This framework is substantially
-different from the halo analysis machinery available in yt-2.x and is 
-entirely backward incompatible.  
+different from the halo analysis machinery available in yt-2.x and is
+entirely backward incompatible.
 For a direct translation of various halo analysis tasks using yt-2.x
 to yt-3.0 please see :ref:`halo-transition`.
 
-A catalog of halos can be created from any initial dataset given to halo 
+A catalog of halos can be created from any initial dataset given to halo
 catalog through data_ds. These halos can be found using friends-of-friends,
 HOP, and Rockstar. The finder_method keyword dictates which halo finder to
-use. The available arguments are :ref:`fof`, :ref:`hop`, and :ref:`rockstar`. 
-For more details on the relative differences between these halo finders see 
+use. The available arguments are :ref:`fof`, :ref:`hop`, and :ref:`rockstar`.
+For more details on the relative differences between these halo finders see
 :ref:`halo_finding`.
 
-The class which holds all of the halo information is the 
+The class which holds all of the halo information is the
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 
 .. code-block:: python
@@ -32,11 +32,11 @@
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
 
-A halo catalog may also be created from already run rockstar outputs. 
-This method is not implemented for previously run friends-of-friends or 
-HOP finders. Even though rockstar creates one file per processor, 
-specifying any one file allows the full catalog to be loaded. Here we 
-only specify the file output by the processor with ID 0. Note that the 
+A halo catalog may also be created from already run rockstar outputs.
+This method is not implemented for previously run friends-of-friends or
+HOP finders. Even though rockstar creates one file per processor,
+specifying any one file allows the full catalog to be loaded. Here we
+only specify the file output by the processor with ID 0. Note that the
 argument for supplying a rockstar output is `halos_ds`, not `data_ds`.
 
 .. code-block:: python
@@ -44,10 +44,10 @@
    halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
    hc = HaloCatalog(halos_ds=halos_ds)
 
-Although supplying only the binary output of the rockstar halo finder 
-is sufficient for creating a halo catalog, it is not possible to find 
-any new information about the identified halos. To associate the halos 
-with the dataset from which they were found, supply arguments to both 
+Although supplying only the binary output of the rockstar halo finder
+is sufficient for creating a halo catalog, it is not possible to find
+any new information about the identified halos. To associate the halos
+with the dataset from which they were found, supply arguments to both
 halos_ds and data_ds.
 
 .. code-block:: python
@@ -56,34 +56,35 @@
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
-A data object can also be supplied via the keyword ``data_source``, 
-associated with either dataset, to control the spatial region in 
+A data object can also be supplied via the keyword ``data_source``,
+associated with either dataset, to control the spatial region in
 which halo analysis will be performed.
 
 Analysis Using Halo Catalogs
 ----------------------------
 
-Analysis is done by adding actions to the 
+Analysis is done by adding actions to the
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
-Each action is represented by a callback function that will be run on each halo. 
-There are three types of actions:
+Each action is represented by a callback function that will be run on
+each halo.  There are four types of actions:
 
 * Filters
 * Quantities
 * Callbacks
+* Recipes
 
-A list of all available filters, quantities, and callbacks can be found in 
-:ref:`halo_analysis_ref`.  
-All interaction with this analysis can be performed by importing from 
+A list of all available filters, quantities, and callbacks can be found in
+:ref:`halo_analysis_ref`.
+All interaction with this analysis can be performed by importing from
 halo_analysis.
 
 Filters
 ^^^^^^^
 
-A filter is a function that returns True or False. If the return value 
-is True, any further queued analysis will proceed and the halo in 
-question will be added to the final catalog. If the return value False, 
-further analysis will not be performed and the halo will not be included 
+A filter is a function that returns True or False. If the return value
+is True, any further queued analysis will proceed and the halo in
+question will be added to the final catalog. If the return value False,
+further analysis will not be performed and the halo will not be included
 in the final catalog.
 
 An example of adding a filter:
@@ -92,11 +93,11 @@
 
    hc.add_filter('quantity_value', 'particle_mass', '>', 1E13, 'Msun')
 
-Currently quantity_value is the only available filter, but more can be 
-added by the user by defining a function that accepts a halo object as 
-the first argument and then adding it as an available filter. If you 
-think that your filter may be of use to the general community, you can 
-add it to ``yt/analysis_modules/halo_analysis/halo_filters.py`` and issue a 
+Currently quantity_value is the only available filter, but more can be
+added by the user by defining a function that accepts a halo object as
+the first argument and then adding it as an available filter. If you
+think that your filter may be of use to the general community, you can
+add it to ``yt/analysis_modules/halo_analysis/halo_filters.py`` and issue a
 pull request.
 
 An example of defining your own filter:
@@ -104,11 +105,11 @@
 .. code-block:: python
 
    def my_filter_function(halo):
-       
+
        # Define condition for filter
        filter_value = True
-       
-       # Return a boolean value 
+
+       # Return a boolean value
        return filter_value
 
    # Add your filter to the filter registry
@@ -120,17 +121,17 @@
 Quantities
 ^^^^^^^^^^
 
-A quantity is a call back that returns a value or values. The return values 
-are stored within the halo object in a dictionary called “quantities.” At 
-the end of the analysis, all of these quantities will be written to disk as 
+A quantity is a call back that returns a value or values. The return values
+are stored within the halo object in a dictionary called “quantities.” At
+the end of the analysis, all of these quantities will be written to disk as
 the final form of the generated halo catalog.
 
-Quantities may be available in the initial fields found in the halo catalog, 
-or calculated from a function after supplying a definition. An example 
-definition of center of mass is shown below. Currently available quantities 
-are center_of_mass and bulk_velocity. Their definitions are available in 
-``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that 
-your quantity may be of use to the general community, add it to 
+Quantities may be available in the initial fields found in the halo catalog,
+or calculated from a function after supplying a definition. An example
+definition of center of mass is shown below. Currently available quantities
+are center_of_mass and bulk_velocity. Their definitions are available in
+``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that
+your quantity may be of use to the general community, add it to
 ``halo_quantities.py`` and issue a pull request.  Default halo quantities are:
 
 * ``particle_identifier`` -- Halo ID (e.g. 0 to N)
@@ -153,7 +154,7 @@
    def my_quantity_function(halo):
        # Define quantity to return
        quantity = 5
-       
+
        return quantity
 
    # Add your filter to the filter registry
@@ -161,9 +162,9 @@
 
 
    # ... Later on in your script
-   hc.add_quantity("my_quantity") 
+   hc.add_quantity("my_quantity")
 
-This quantity will then be accessible for functions called later via the 
+This quantity will then be accessible for functions called later via the
 *quantities* dictionary that is associated with the halo object.
 
 .. code-block:: python
@@ -178,23 +179,23 @@
 Callbacks
 ^^^^^^^^^
 
-A callback is actually the super class for quantities and filters and 
-is a general purpose function that does something, anything, to a Halo 
-object. This can include hanging new attributes off the Halo object, 
-performing analysis and writing to disk, etc. A callback does not return 
+A callback is actually the super class for quantities and filters and
+is a general purpose function that does something, anything, to a Halo
+object. This can include hanging new attributes off the Halo object,
+performing analysis and writing to disk, etc. A callback does not return
 anything.
 
-An example of using a pre-defined callback where we create a sphere for 
+An example of using a pre-defined callback where we create a sphere for
 each halo with a radius that is twice the saved ``radius``.
 
 .. code-block:: python
 
    hc.add_callback("sphere", factor=2.0)
-    
-Currently available callbacks are located in 
-``yt/analysis_modules/halo_analysis/halo_callbacks.py``.  New callbacks may 
-be added by using the syntax shown below. If you think that your 
-callback may be of use to the general community, add it to 
+
+Currently available callbacks are located in
+``yt/analysis_modules/halo_analysis/halo_callbacks.py``.  New callbacks may
+be added by using the syntax shown below. If you think that your
+callback may be of use to the general community, add it to
 halo_callbacks.py and issue a pull request.
 
 An example of defining your own callback:
@@ -213,40 +214,84 @@
    # ...  Later on in your script
    hc.add_callback("my_callback")
 
+Recipes
+^^^^^^^
+
+Recipes allow you to create analysis tasks that consist of a series of
+callbacks, quantities, and filters that are run in succession.  An example
+of this is
+:func:`~yt.analysis_modules.halo_analysis.halo_recipes.calculate_virial_quantities`,
+which calculates virial quantities by first creating a sphere container,
+performing 1D radial profiles, and then interpolating to get values at a
+specified threshold overdensity.  All of these operations are separate
+callbacks, but the recipes allow you to add them to your analysis pipeline
+with one call.  For example,
+
+.. code-block:: python
+
+   hc.add_recipe("calculate_virial_quantities", ["radius", "matter_mass"])
+
+The available recipes are located in
+``yt/analysis_modules/halo_analysis/halo_recipes.py``.  New recipes can be
+created in the following manner:
+
+.. code-block:: python
+
+   def my_recipe(halo_catalog, fields, weight_field=None):
+       # create a sphere
+       halo_catalog.add_callback("sphere")
+       # make profiles
+       halo_catalog.add_callback("profile", ["radius"], fields,
+                                 weight_field=weight_field)
+       # save the profile data
+       halo_catalog.add_callback("save_profiles", output_dir="profiles")
+
+   # add recipe to the registry of recipes
+   add_recipe("profile_and_save", my_recipe)
+
+
+   # ...  Later on in your script
+   hc.add_recipe("profile_and_save", ["density", "temperature"],
+                 weight_field="cell_mass")
+
+Note, that unlike callback, filter, and quantity functions that take a ``Halo``
+object as the first argument, recipe functions should take a ``HaloCatalog``
+object as the first argument.
+
 Running Analysis
 ----------------
 
-After all callbacks, quantities, and filters have been added, the 
+After all callbacks, quantities, and filters have been added, the
 analysis begins with a call to HaloCatalog.create.
 
 .. code-block:: python
 
    hc.create()
 
-The save_halos keyword determines whether the actual Halo objects 
-are saved after analysis on them has completed or whether just the 
-contents of their quantities dicts will be retained for creating the 
-final catalog. The looping over halos uses a call to parallel_objects 
-allowing the user to control how many processors work on each halo. 
-The final catalog is written to disk in the output directory given 
-when the 
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` 
+The save_halos keyword determines whether the actual Halo objects
+are saved after analysis on them has completed or whether just the
+contents of their quantities dicts will be retained for creating the
+final catalog. The looping over halos uses a call to parallel_objects
+allowing the user to control how many processors work on each halo.
+The final catalog is written to disk in the output directory given
+when the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
 object was created.
 
-All callbacks, quantities, and filters are stored in an actions list, 
-meaning that they are executed in the same order in which they were added. 
-This enables the use of simple, reusable, single action callbacks that 
-depend on each other. This also prevents unnecessary computation by allowing 
-the user to add filters at multiple stages to skip remaining analysis if it 
+All callbacks, quantities, and filters are stored in an actions list,
+meaning that they are executed in the same order in which they were added.
+This enables the use of simple, reusable, single action callbacks that
+depend on each other. This also prevents unnecessary computation by allowing
+the user to add filters at multiple stages to skip remaining analysis if it
 is not warranted.
 
 Saving and Reloading Halo Catalogs
 ----------------------------------
 
-A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` 
-saved to disk can be reloaded as a yt dataset with the 
-standard call to load. Any side data, such as profiles, can be reloaded 
-with a ``load_profiles`` callback and a call to 
+A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+saved to disk can be reloaded as a yt dataset with the
+standard call to load. Any side data, such as profiles, can be reloaded
+with a ``load_profiles`` callback and a call to
 :func:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog.load`.
 
 .. code-block:: python
@@ -261,5 +306,5 @@
 Worked Example of Halo Catalog in Action
 ----------------------------------------
 
-For a full example of how to use these methods together see 
+For a full example of how to use these methods together see
 :ref:`halo-analysis-example`.

diff -r 22159764d57bfdcfbaeefc270f3b912ce66cfaf4 -r a4624d509d5e9736b8153b22dc40c69eb2cdb792 doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -3,16 +3,16 @@
 Halo Finding
 ============
 
-There are three methods of finding particle haloes in yt. The 
-default method is called HOP, a method described 
-in `Eisenstein and Hut (1998) 
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic 
-friends-of-friends (e.g. `Efstathiou et al. (1985) 
-<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo 
-finder is also implemented. Finally Rockstar (`Behroozi et a. 
-(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is 
-a 6D-phase space halo finder developed by Peter Behroozi that 
-excels in finding subhalos and substrcture, but does not allow 
+There are three methods of finding particle haloes in yt. The
+default method is called HOP, a method described
+in `Eisenstein and Hut (1998)
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic
+friends-of-friends (e.g. `Efstathiou et al. (1985)
+<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo
+finder is also implemented. Finally Rockstar (`Behroozi et a.
+(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is
+a 6D-phase space halo finder developed by Peter Behroozi that
+excels in finding subhalos and substrcture, but does not allow
 multiple particle masses.
 
 .. _hop:
@@ -20,32 +20,32 @@
 HOP
 ---
 
-The version of HOP used in yt is an upgraded version of the 
-`publicly available HOP code 
-<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support 
-for 64-bit floats and integers has been added, as well as 
-parallel analysis through spatial decomposition. HOP builds 
+The version of HOP used in yt is an upgraded version of the
+`publicly available HOP code
+<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
+for 64-bit floats and integers has been added, as well as
+parallel analysis through spatial decomposition. HOP builds
 groups in this fashion:
 
-#. Estimates the local density at each particle using a 
+#. Estimates the local density at each particle using a
    smoothing kernel.
 
-#. Builds chains of linked particles by 'hopping' from one 
-   particle to its densest neighbor. A particle which is 
+#. Builds chains of linked particles by 'hopping' from one
+   particle to its densest neighbor. A particle which is
    its own densest neighbor is the end of the chain.
 
-#. All chains that share the same densest particle are 
+#. All chains that share the same densest particle are
    grouped together.
 
-#. Groups are included, linked together, or discarded 
+#. Groups are included, linked together, or discarded
    depending on the user-supplied over density
    threshold parameter. The default is 160.0.
 
 Please see the `HOP method paper 
 <http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for 
 full details and the 
-:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHalo` and
-:class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
+:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`
+documentation.
 
 .. _fof:
 
@@ -53,36 +53,36 @@
 ---
 
 A basic friends-of-friends halo finder is included.  See the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHalo` and
-:class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
+:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`
+documentation.
 
 .. _rockstar:
 
 Rockstar Halo Finding
 ---------------------
 
-Rockstar uses an adaptive hierarchical refinement of friends-of-friends 
-groups in six phase-space dimensions and one time dimension, which 
+Rockstar uses an adaptive hierarchical refinement of friends-of-friends
+groups in six phase-space dimensions and one time dimension, which
 allows for robust (grid-independent, shape-independent, and noise-
-resilient) tracking of substructure. The code is prepackaged with yt, 
-but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead 
+resilient) tracking of substructure. The code is prepackaged with yt,
+but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
 developer is Peter Behroozi, and the methods are described in `Behroozi
-et al. 2011 <http://arxiv.org/abs/1110.4372>`_. 
-In order to run the Rockstar halo finder in yt, make sure you've 
+et al. 2011 <http://arxiv.org/abs/1110.4372>`_.
+In order to run the Rockstar halo finder in yt, make sure you've
 :ref:`installed it so that it can integrate with yt <rockstar-installation>`.
 
-At the moment, Rockstar does not support multiple particle masses, 
-instead using a fixed particle mass. This will not affect most dark matter 
+At the moment, Rockstar does not support multiple particle masses,
+instead using a fixed particle mass. This will not affect most dark matter
 simulations, but does make it less useful for finding halos from the stellar
-mass. In simulations where the highest-resolution particles all have the 
+mass. In simulations where the highest-resolution particles all have the
 same mass (ie: zoom-in grid based simulations), one can set up a particle
 filter to select the lowest mass particles and perform the halo finding
-only on those.  See the this cookbook recipe for an example: 
+only on those.  See the this cookbook recipe for an example:
 :ref:`cookbook-rockstar-nested-grid`.
 
-To run the Rockstar Halo finding, you must launch python with MPI and 
-parallelization enabled. While Rockstar itself does not require MPI to run, 
-the MPI libraries allow yt to distribute particle information across multiple 
+To run the Rockstar Halo finding, you must launch python with MPI and
+parallelization enabled. While Rockstar itself does not require MPI to run,
+the MPI libraries allow yt to distribute particle information across multiple
 nodes.
 
 .. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
@@ -92,23 +92,23 @@
    For example, here is how Rockstar might be called using 24 cores:
    ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
 
-The script above configures the Halo finder, launches a server process which 
-disseminates run information and coordinates writer-reader processes. 
-Afterwards, it launches reader and writer tasks, filling the available MPI 
-slots, which alternately read particle information and analyze for halo 
+The script above configures the Halo finder, launches a server process which
+disseminates run information and coordinates writer-reader processes.
+Afterwards, it launches reader and writer tasks, filling the available MPI
+slots, which alternately read particle information and analyze for halo
 content.
 
-The RockstarHaloFinder class has these options that can be supplied to the 
+The RockstarHaloFinder class has these options that can be supplied to the
 halo catalog through the ``finder_kwargs`` argument:
 
-* ``dm_type``, the index of the dark matter particle. Default is 1. 
+* ``dm_type``, the index of the dark matter particle. Default is 1.
 * ``outbase``, This is where the out*list files that Rockstar makes should be
   placed. Default is 'rockstar_halos'.
-* ``num_readers``, the number of reader tasks (which are idle most of the 
+* ``num_readers``, the number of reader tasks (which are idle most of the
   time.) Default is 1.
 * ``num_writers``, the number of writer tasks (which are fed particles and
-  do most of the analysis). Default is MPI_TASKS-num_readers-1. 
-  If left undefined, the above options are automatically 
+  do most of the analysis). Default is MPI_TASKS-num_readers-1.
+  If left undefined, the above options are automatically
   configured from the number of available MPI tasks.
 * ``force_res``, the resolution that Rockstar uses for various calculations
   and smoothing lengths. This is in units of Mpc/h.
@@ -130,14 +130,14 @@
   this option can save disk access time if there are no star particles
   (or other non-dark matter particles) in the simulation. Default: ``False``.
 
-Rockstar dumps halo information in a series of text (halo*list and 
-out*list) and binary (halo*bin) files inside the ``outbase`` directory. 
-We use the halo list classes to recover the information. 
+Rockstar dumps halo information in a series of text (halo*list and
+out*list) and binary (halo*bin) files inside the ``outbase`` directory.
+We use the halo list classes to recover the information.
 
 Inside the ``outbase`` directory there is a text file named ``datasets.txt``
 that records the connection between ds names and the Rockstar file names.
 
-For more information, see the 
+For more information, see the
 :class:`~yt.analysis_modules.halo_finding.halo_objects.RockstarHalo` and
 :class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
 
@@ -146,9 +146,9 @@
 Parallel HOP and FOF
 --------------------
 
-Both the HOP and FoF halo finders can run in parallel using simple 
-spatial decomposition. In order to run them in parallel it is helpful 
-to understand how it works. Below in the first plot (i) is a simplified 
+Both the HOP and FoF halo finders can run in parallel using simple
+spatial decomposition. In order to run them in parallel it is helpful
+to understand how it works. Below in the first plot (i) is a simplified
 depiction of three haloes labeled 1,2 and 3:
 
 .. image:: _images/ParallelHaloFinder.png
@@ -156,35 +156,35 @@
 
 Halo 3 is twice reflected around the periodic boundary conditions.
 
-In (ii), the volume has been sub-divided into four equal subregions, 
-A,B,C and D, shown with dotted lines. Notice that halo 2 is now in 
-two different subregions, C and D, and that halo 3 is now in three, 
+In (ii), the volume has been sub-divided into four equal subregions,
+A,B,C and D, shown with dotted lines. Notice that halo 2 is now in
+two different subregions, C and D, and that halo 3 is now in three,
 A, B and D. If the halo finder is run on these four separate subregions,
-halo 1 is be identified as a single halo, but haloes 2 and 3 are split 
-up into multiple haloes, which is incorrect. The solution is to give 
+halo 1 is be identified as a single halo, but haloes 2 and 3 are split
+up into multiple haloes, which is incorrect. The solution is to give
 each subregion padding to oversample into neighboring regions.
 
-In (iii), subregion C has oversampled into the other three regions, 
-with the periodic boundary conditions taken into account, shown by 
+In (iii), subregion C has oversampled into the other three regions,
+with the periodic boundary conditions taken into account, shown by
 dot-dashed lines. The other subregions oversample in a similar way.
 
-The halo finder is then run on each padded subregion independently 
-and simultaneously. By oversampling like this, haloes 2 and 3 will 
-both be enclosed fully in at least one subregion and identified 
+The halo finder is then run on each padded subregion independently
+and simultaneously. By oversampling like this, haloes 2 and 3 will
+both be enclosed fully in at least one subregion and identified
 completely.
 
-Haloes identified with centers of mass inside the padded part of a 
-subregion are thrown out, eliminating the problem of halo duplication. 
+Haloes identified with centers of mass inside the padded part of a
+subregion are thrown out, eliminating the problem of halo duplication.
 The centers for the three haloes are shown with stars. Halo 1 will
 belong to subregion A, 2 to C and 3 to B.
 
-To run with parallel halo finding, you must supply a value for 
-padding in the finder_kwargs argument. The ``padding`` parameter 
-is in simulation units and defaults to 0.02. This parameter is how 
-much padding is added to each of the six sides of a subregion. 
-This value should be 2x-3x larger than the largest expected halo 
-in the simulation. It is unlikely, of course, that the largest 
-object in the simulation will be on a subregion boundary, but there 
+To run with parallel halo finding, you must supply a value for
+padding in the finder_kwargs argument. The ``padding`` parameter
+is in simulation units and defaults to 0.02. This parameter is how
+much padding is added to each of the six sides of a subregion.
+This value should be 2x-3x larger than the largest expected halo
+in the simulation. It is unlikely, of course, that the largest
+object in the simulation will be on a subregion boundary, but there
 is no way of knowing before the halo finder is run.
 
 .. code-block:: python
@@ -197,10 +197,10 @@
   # --or--
   hc = HaloCatalog(data_ds = ds, finder_method = 'fof', finder_kwargs={'padding':0.02})
 
-In general, a little bit of padding goes a long way, and too much 
-just slows down the analysis and doesn't improve the answer (but 
-doesn't change it).  It may be worth your time to run the parallel 
-halo finder at a few paddings to find the right amount, especially 
+In general, a little bit of padding goes a long way, and too much
+just slows down the analysis and doesn't improve the answer (but
+doesn't change it).  It may be worth your time to run the parallel
+halo finder at a few paddings to find the right amount, especially
 if you're analyzing many similar datasets.
 
 .. _rockstar-installation:
@@ -209,15 +209,15 @@
 ---------------------
 
 Because of changes in the Rockstar API over time, yt only currently works with
-a slightly older version of Rockstar.  This version of Rockstar has been 
-slightly patched and modified to run as a library inside of yt. By default it 
-is not installed with yt, but installation is very easy.  The 
-:ref:`install-script` used to install yt from source has a line: 
+a slightly older version of Rockstar.  This version of Rockstar has been
+slightly patched and modified to run as a library inside of yt. By default it
+is not installed with yt, but installation is very easy.  The
+:ref:`install-script` used to install yt from source has a line:
 ``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
 rerun this installer script over the top of an existing installation, and
-it will only install components missing from the existing installation.  
+it will only install components missing from the existing installation.
 You can do this as follows.  Put your freshly modified install_script in
-the parent directory of the yt installation directory (e.g. the parent of 
+the parent directory of the yt installation directory (e.g. the parent of
 ``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
 
 .. code-block:: bash

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/44e07a0982c3/
Changeset:   44e07a0982c3
Branch:      yt
User:        atmyers
Date:        2016-04-14 23:39:41+00:00
Summary:     merging.
Affected #:  1 file



https://bitbucket.org/yt_analysis/yt/commits/f34729a43751/
Changeset:   f34729a43751
Branch:      yt
User:        atmyers
Date:        2016-04-19 17:27:30+00:00
Summary:     Adding shaders for OpenGL mesh rendering.
Affected #:  4 files

diff -r 44e07a0982c39e904b80818e815bf31d099f8477 -r f34729a437518b3350d7e698ae5adddc75dc55e4 yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -533,6 +533,10 @@
             GL.glGenerateMipmap(GL.GL_TEXTURE_3D)
 
 
+class MeshScene(SceneComponent):
+    def __init__(self):
+        super(MeshScene, self).__init__()
+
 class SceneGraph(SceneComponent):
     """A basic OpenGL render for IDV.
 

diff -r 44e07a0982c39e904b80818e815bf31d099f8477 -r f34729a437518b3350d7e698ae5adddc75dc55e4 yt/visualization/volume_rendering/shader_objects.py
--- a/yt/visualization/volume_rendering/shader_objects.py
+++ b/yt/visualization/volume_rendering/shader_objects.py
@@ -29,7 +29,7 @@
 class ShaderProgram(object):
     '''
     Wrapper class that compiles and links vertex and fragment shaders
-    into shader program.
+    into a shader program.
 
     Parameters
     ----------
@@ -269,3 +269,13 @@
     '''A second pass vertex shader that performs no operations on vertices'''
     _source = "passthrough.vertexshader"
     _shader_name = "passthrough.v"
+
+class MeshVertexShader(VertexShader):
+    '''A vertex shader used for unstructured mesh rendering.'''
+    _source = "mesh.vertexshader"
+    _shader_name = "mesh.v"
+
+class MeshFragmentShader(FragmentShader):
+    '''A vertex shader used for unstructured mesh rendering.'''
+    _source = "mesh.fragmentshader"
+    _shader_name = "mesh.f"

diff -r 44e07a0982c39e904b80818e815bf31d099f8477 -r f34729a437518b3350d7e698ae5adddc75dc55e4 yt/visualization/volume_rendering/shaders/mesh.fragmentshader
--- /dev/null
+++ b/yt/visualization/volume_rendering/shaders/mesh.fragmentshader
@@ -0,0 +1,8 @@
+#version 330 core
+
+in vec3 fragmentColor;
+out vec3 color;
+void main()
+{
+    color = fragmentColor;
+}

diff -r 44e07a0982c39e904b80818e815bf31d099f8477 -r f34729a437518b3350d7e698ae5adddc75dc55e4 yt/visualization/volume_rendering/shaders/mesh.vertexshader
--- /dev/null
+++ b/yt/visualization/volume_rendering/shaders/mesh.vertexshader
@@ -0,0 +1,11 @@
+#version 330 core
+
+layout(location = 0) in vec3 vertexPosition_modelspace;
+layout(location = 1) in vec3 vertexColor;
+out vec3 fragmentColor;
+uniform mat4 model_to_clip;
+void main()
+{
+    gl_Position = model_to_clip * vec4(vertexPosition_modelspace, 1);
+    fragmentColor = vertexColor;
+}


https://bitbucket.org/yt_analysis/yt/commits/7bed94207db7/
Changeset:   7bed94207db7
Branch:      yt
User:        atmyers
Date:        2016-04-19 19:58:18+00:00
Summary:     a sorta functional opengl mesh rendering mode
Affected #:  2 files

diff -r f34729a437518b3350d7e698ae5adddc75dc55e4 -r 7bed94207db7dabcbf72ca90d445d4a47a49437a yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -18,6 +18,7 @@
 from collections import OrderedDict
 import matplotlib.cm as cm
 import numpy as np
+import ctypes
 
 from yt.utilities.math_utils import \
     get_translate_matrix, \
@@ -28,7 +29,8 @@
     quaternion_to_rotation_matrix, \
     rotation_matrix_to_quaternion
 from .shader_objects import known_shaders, ShaderProgram
-
+from yt.convenience import load
+from yt.visualization.image_writer import apply_colormap
 
 bbox_vertices = np.array(
       [[ 0.,  0.,  0.,  1.],
@@ -77,6 +79,15 @@
      +1.0, +1.0, 0.0], dtype=np.float32
 )
 
+triangulate_hex = np.array([
+    [0, 2, 1], [0, 3, 2],
+    [4, 5, 6], [4, 6, 7],
+    [0, 1, 5], [0, 5, 4],
+    [1, 2, 6], [1, 6, 5],
+    [0, 7, 3], [0, 4, 7],
+    [3, 6, 2], [3, 7, 6]]
+)
+
 
 class IDVCamera(object):
     '''Camera object used in the Interactive Data Visualization
@@ -534,8 +545,107 @@
 
 
 class MeshScene(SceneComponent):
+
     def __init__(self):
         super(MeshScene, self).__init__()
+        self.set_shader("mesh.v")
+        self.set_shader("mesh.f")
+
+        self.data_source = None
+        self.redraw = True
+        self.camera = None
+
+        GL.glEnable(GL.GL_DEPTH_TEST)
+        GL.glDepthFunc(GL.GL_LESS)
+        GL.glEnable(GL.GL_CULL_FACE)
+
+        fn = "MOOSE_sample_data/out.e-s010"
+        vertices, colors, indices = self.read_mesh_data(fn)
+
+        self._initialize_vertex_array("mesh_info")
+        GL.glBindVertexArray(self.vert_arrays["mesh_info"])
+
+        self.add_vert_attrib("vertex_buffer", vertices, vertices.size)
+        self.add_vert_attrib("color_buffer", colors, colors.size)
+
+        self.vert_attrib["element_buffer"] = (GL.glGenBuffers(1), indices.size)
+        GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.vert_attrib["element_buffer"][0])
+        GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL.GL_STATIC_DRAW)
+
+        self.transform_matrix = GL.glGetUniformLocation(self.program.program,
+                                                        "model_to_clip")
+
+    def set_camera(self, camera):
+        r""" Sets the camera orientation for the entire scene.
+
+        Parameters
+        ----------
+        camera : Camera
+
+        """
+        self.camera = camera
+        self.redraw = True
+
+    def update_minmax(self):
+        pass
+
+    def read_mesh_data(self, fn):
+        """
+        
+        This reads in the ExodusII output file specified by fn and converts
+        the data to form that can be fed in to OpenGL.
+        
+        """
+
+        ds = load(fn)
+
+        vertices = ds.index.meshes[0].connectivity_coords
+        indices  = ds.index.meshes[0].connectivity_indices - 1
+        data = ds._vars['vals_nod_var2'][:]
+
+        colors = apply_colormap(data, (0.0, 2.0), 'algae') / 255.0
+        colors = colors.squeeze()
+        colors = colors[:, 0:3]
+
+        tri_indices = []
+        for elem in indices:
+            for tri in triangulate_hex:
+                tri_indices.append(elem[tri])
+
+        tri_indices = np.array(tri_indices)
+
+        v = vertices.astype(np.float32).flatten()
+        c = colors.astype(np.float32).flatten()
+        i = tri_indices.astype(np.uint32).flatten()
+
+        return v, c, i
+
+    def run_program(self):
+        """ Renders one frame of the scene. """
+        with self.program.enable():
+            GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
+            projection_matrix = self.camera.projection_matrix
+            view_matrix = self.camera.view_matrix
+            model_to_clip = np.dot(projection_matrix, view_matrix)
+            GL.glUniformMatrix4fv(self.transform_matrix, 1, True, model_to_clip)
+
+            GL.glEnableVertexAttribArray(0)
+            GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vert_attrib["vertex_buffer"][0])
+            GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, False, 0, ctypes.c_void_p(0))
+
+            GL.glEnableVertexAttribArray(1)
+            GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vert_attrib["color_buffer"][0])
+            GL.glVertexAttribPointer(1, 3, GL.GL_FLOAT, False, 0, ctypes.c_void_p(0))
+
+            GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.vert_attrib["element_buffer"][0])
+            GL.glDrawElements(GL.GL_TRIANGLES, self.vert_attrib["element_buffer"][1],
+                              GL.GL_UNSIGNED_INT, ctypes.c_void_p(0))
+
+            GL.glDisableVertexAttribArray(0)
+            GL.glDisableVertexAttribArray(1)
+
+    render = run_program
+
 
 class SceneGraph(SceneComponent):
     """A basic OpenGL render for IDV.

diff -r f34729a437518b3350d7e698ae5adddc75dc55e4 -r 7bed94207db7dabcbf72ca90d445d4a47a49437a yt/visualization/volume_rendering/interactive_vr_helpers.py
--- a/yt/visualization/volume_rendering/interactive_vr_helpers.py
+++ b/yt/visualization/volume_rendering/interactive_vr_helpers.py
@@ -53,6 +53,7 @@
     '''
 
     from .interactive_vr import SceneGraph, BlockCollection, TrackballCamera
+    from .interactive_vr import MeshScene
     from .interactive_loop import RenderingContext
 
     if isinstance(data_source, Dataset):
@@ -77,13 +78,16 @@
     near_plane = 0.01 * far_plane
 
     rc = RenderingContext(*window_size)
-    scene = SceneGraph()
-    collection = BlockCollection()
-    collection.add_data(dobj, field)
-    scene.add_collection(collection)
+#    scene = SceneGraph()
+#    collection = BlockCollection()
+#    collection.add_data(dobj, field)
+#    scene.add_collection(collection)
+
+    scene = MeshScene()
 
     c = TrackballCamera(position=cam_position, focus=cam_focus, near_plane=near_plane,
                         far_plane=far_plane, aspect_ratio=aspect_ratio)
+    c = TrackballCamera(position=np.array([np.sqrt(100.0/3.0), np.sqrt(100.0/3.0) + 1.0, np.sqrt(100.0/3.0)]), focus=np.array([0.0, 0.0, 0.0]))
     rc.start_loop(scene, c)
 
 


https://bitbucket.org/yt_analysis/yt/commits/7e7e0389e0bb/
Changeset:   7e7e0389e0bb
Branch:      yt
User:        atmyers
Date:        2016-04-19 20:11:06+00:00
Summary:     more triangulation arrays
Affected #:  1 file

diff -r 7bed94207db7dabcbf72ca90d445d4a47a49437a -r 7e7e0389e0bbafeaf232a25f6aaaf6566b9f3855 yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -88,6 +88,18 @@
     [3, 6, 2], [3, 7, 6]]
 )
 
+triangulate_tetra = np.array([
+    [0, 1, 2], [0, 1, 3],
+    [0, 2, 3], [1, 2, 3]]
+)
+
+triangulate_wedge = np.array([
+    [0, 1, 2], [0, 3, 1],
+    [1, 3, 4], [0, 2, 3],
+    [2, 5, 3], [1, 4, 2],
+    [2, 4, 5], [3, 5, 4]]
+)
+
 
 class IDVCamera(object):
     '''Camera object used in the Interactive Data Visualization


https://bitbucket.org/yt_analysis/yt/commits/2a5f88cba93d/
Changeset:   2a5f88cba93d
Branch:      yt
User:        atmyers
Date:        2016-04-19 20:11:33+00:00
Summary:     comment typo fix
Affected #:  1 file

diff -r 7e7e0389e0bbafeaf232a25f6aaaf6566b9f3855 -r 2a5f88cba93d19236db270de07f7899464205d5d yt/utilities/lib/mesh_construction.h
--- a/yt/utilities/lib/mesh_construction.h
+++ b/yt/utilities/lib/mesh_construction.h
@@ -37,7 +37,7 @@
   {-1, -1, -1}
 };
 
-// Triangule wedges
+// Triangulate wedges
 int triangulate_wedge[MAX_NUM_TRI][3] = {
   {0, 1, 2},
   {0, 3, 1},


https://bitbucket.org/yt_analysis/yt/commits/32baa8674386/
Changeset:   32baa8674386
Branch:      yt
User:        atmyers
Date:        2016-04-19 21:02:58+00:00
Summary:     generalizing the opengl mesh rendering to work with all the different first order mesh types.
Affected #:  2 files

diff -r 2a5f88cba93d19236db270de07f7899464205d5d -r 32baa867438633f64d379871ceb6e917fe3890f8 yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -558,7 +558,7 @@
 
 class MeshScene(SceneComponent):
 
-    def __init__(self):
+    def __init__(self, ds, field):
         super(MeshScene, self).__init__()
         self.set_shader("mesh.v")
         self.set_shader("mesh.f")
@@ -571,8 +571,7 @@
         GL.glDepthFunc(GL.GL_LESS)
         GL.glEnable(GL.GL_CULL_FACE)
 
-        fn = "MOOSE_sample_data/out.e-s010"
-        vertices, colors, indices = self.read_mesh_data(fn)
+        vertices, colors, indices = self.read_mesh_data(ds, field)
 
         self._initialize_vertex_array("mesh_info")
         GL.glBindVertexArray(self.vert_arrays["mesh_info"])
@@ -601,27 +600,38 @@
     def update_minmax(self):
         pass
 
-    def read_mesh_data(self, fn):
+    def read_mesh_data(self, ds, field):
         """
         
-        This reads in the ExodusII output file specified by fn and converts
-        the data to form that can be fed in to OpenGL.
+        This reads the mesh data into a form that can be fed in to OpenGL.
         
         """
 
-        ds = load(fn)
+        ftype, fname = field
+        mesh_id = int(ftype[-1])
+        offset = ds.index.io._INDEX_OFFSET
+        field_ind = ds.index.io.node_fields.index(fname)
 
-        vertices = ds.index.meshes[0].connectivity_coords
-        indices  = ds.index.meshes[0].connectivity_indices - 1
-        data = ds._vars['vals_nod_var2'][:]
+        vertices = ds.index.meshes[mesh_id-1].connectivity_coords
+        indices  = ds.index.meshes[mesh_id-1].connectivity_indices - offset
+        data = ds._vars['vals_nod_var%d' % (field_ind + 1)][:]
 
         colors = apply_colormap(data, (0.0, 2.0), 'algae') / 255.0
         colors = colors.squeeze()
         colors = colors[:, 0:3]
 
+        if indices.shape[1] == 8:
+            tri_array = triangulate_hex
+        elif indices.shape[1] == 4:
+            tri_array = triangulate_tetra
+        elif indices.shape[1] == 6:
+            tri_array = triangulate_wedge
+        else:
+            raise NotImplementedError
+
         tri_indices = []
         for elem in indices:
-            for tri in triangulate_hex:
+            for tri in tri_array:
                 tri_indices.append(elem[tri])
 
         tri_indices = np.array(tri_indices)

diff -r 2a5f88cba93d19236db270de07f7899464205d5d -r 32baa867438633f64d379871ceb6e917fe3890f8 yt/visualization/volume_rendering/interactive_vr_helpers.py
--- a/yt/visualization/volume_rendering/interactive_vr_helpers.py
+++ b/yt/visualization/volume_rendering/interactive_vr_helpers.py
@@ -52,8 +52,8 @@
 
     '''
 
-    from .interactive_vr import SceneGraph, BlockCollection, TrackballCamera
-    from .interactive_vr import MeshScene
+    from .interactive_vr import SceneGraph, BlockCollection, TrackballCamera, \
+        MeshScene
     from .interactive_loop import RenderingContext
 
     if isinstance(data_source, Dataset):
@@ -73,21 +73,23 @@
     if cam_focus is None:
         cam_focus = dobj.ds.domain_center
 
+    rc = RenderingContext(*window_size)
+
+    if hasattr(dobj.ds.index, "meshes"):
+        cam_position = 3.0*dobj.ds.domain_right_edge
+        scene = MeshScene(data_source, field)
+    else:
+        scene = SceneGraph()
+        collection = BlockCollection()
+        collection.add_data(dobj, field)
+        scene.add_collection(collection)
+
     aspect_ratio = window_size[1] / window_size[0]
     far_plane = np.linalg.norm(cam_focus - cam_position) * 2.0
     near_plane = 0.01 * far_plane
 
-    rc = RenderingContext(*window_size)
-#    scene = SceneGraph()
-#    collection = BlockCollection()
-#    collection.add_data(dobj, field)
-#    scene.add_collection(collection)
-
-    scene = MeshScene()
-
     c = TrackballCamera(position=cam_position, focus=cam_focus, near_plane=near_plane,
                         far_plane=far_plane, aspect_ratio=aspect_ratio)
-    c = TrackballCamera(position=np.array([np.sqrt(100.0/3.0), np.sqrt(100.0/3.0) + 1.0, np.sqrt(100.0/3.0)]), focus=np.array([0.0, 0.0, 0.0]))
     rc.start_loop(scene, c)
 
 


https://bitbucket.org/yt_analysis/yt/commits/a361e8612702/
Changeset:   a361e8612702
Branch:      yt
User:        atmyers
Date:        2016-04-19 21:11:26+00:00
Summary:     smarter default field detection for ExodusII frontend
Affected #:  1 file

diff -r 32baa867438633f64d379871ceb6e917fe3890f8 -r a361e8612702f5a204b6a120e4b1e5c886203ac1 yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -154,7 +154,8 @@
                                               units_override=units_override)
         self.index_filename = filename
         self.storage_filename = storage_filename
-        self.default_field = ("connect1", "diffused")
+        self.default_field = [f for f in self.field_list 
+                              if f[0] == 'connect1'][-1]
 
     def _set_code_unit_attributes(self):
         # This is where quantities are created that represent the various


https://bitbucket.org/yt_analysis/yt/commits/a25591c49616/
Changeset:   a25591c49616
Branch:      yt
User:        atmyers
Date:        2016-04-19 21:33:26+00:00
Summary:     use reasonable min/max values for colormap.
Affected #:  1 file

diff -r a361e8612702f5a204b6a120e4b1e5c886203ac1 -r a25591c496164b6283a7290bf17100c6a3615b49 yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -571,7 +571,7 @@
         GL.glDepthFunc(GL.GL_LESS)
         GL.glEnable(GL.GL_CULL_FACE)
 
-        vertices, colors, indices = self.read_mesh_data(ds, field)
+        vertices, colors, indices = self.get_mesh_data(ds, field)
 
         self._initialize_vertex_array("mesh_info")
         GL.glBindVertexArray(self.vert_arrays["mesh_info"])
@@ -600,7 +600,7 @@
     def update_minmax(self):
         pass
 
-    def read_mesh_data(self, ds, field):
+    def get_mesh_data(self, ds, field):
         """
         
         This reads the mesh data into a form that can be fed in to OpenGL.
@@ -616,7 +616,7 @@
         indices  = ds.index.meshes[mesh_id-1].connectivity_indices - offset
         data = ds._vars['vals_nod_var%d' % (field_ind + 1)][:]
 
-        colors = apply_colormap(data, (0.0, 2.0), 'algae') / 255.0
+        colors = apply_colormap(data, (data.min(), data.max()), 'algae') / 255.0
         colors = colors.squeeze()
         colors = colors[:, 0:3]
 


https://bitbucket.org/yt_analysis/yt/commits/c7b0c1d27150/
Changeset:   c7b0c1d27150
Branch:      yt
User:        atmyers
Date:        2016-04-20 21:07:51+00:00
Summary:     fix winding order for tetrahedral and wedge elements, opengl only.
Affected #:  1 file

diff -r a25591c496164b6283a7290bf17100c6a3615b49 -r c7b0c1d27150ff640070e6ca95c4d3799b26402c yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -89,15 +89,15 @@
 )
 
 triangulate_tetra = np.array([
-    [0, 1, 2], [0, 1, 3],
-    [0, 2, 3], [1, 2, 3]]
+    [0, 1, 3], [2, 3, 1],
+    [0, 3, 2], [0, 2, 1]]
 )
 
 triangulate_wedge = np.array([
-    [0, 1, 2], [0, 3, 1],
-    [1, 3, 4], [0, 2, 3],
-    [2, 5, 3], [1, 4, 2],
-    [2, 4, 5], [3, 5, 4]]
+    [3, 0, 1], [4, 3, 1],
+    [2, 5, 4], [2, 4, 1],
+    [0, 3, 2], [2, 3, 5],
+    [3, 4, 5], [0, 2, 1]]
 )
 
 
@@ -570,6 +570,7 @@
         GL.glEnable(GL.GL_DEPTH_TEST)
         GL.glDepthFunc(GL.GL_LESS)
         GL.glEnable(GL.GL_CULL_FACE)
+        GL.glCullFace(GL.GL_BACK)
 
         vertices, colors, indices = self.get_mesh_data(ds, field)
 
@@ -614,7 +615,7 @@
 
         vertices = ds.index.meshes[mesh_id-1].connectivity_coords
         indices  = ds.index.meshes[mesh_id-1].connectivity_indices - offset
-        data = ds._vars['vals_nod_var%d' % (field_ind + 1)][:]
+        data = ds._vars['vals_nod_var%d' % (field_ind + 1)][ds.step]
 
         colors = apply_colormap(data, (data.min(), data.max()), 'algae') / 255.0
         colors = colors.squeeze()


https://bitbucket.org/yt_analysis/yt/commits/cf50b06fc61a/
Changeset:   cf50b06fc61a
Branch:      yt
User:        atmyers
Date:        2016-04-20 23:25:48+00:00
Summary:     doc string typo fix.
Affected #:  1 file

diff -r c7b0c1d27150ff640070e6ca95c4d3799b26402c -r cf50b06fc61a5d693de786b5ec90a1f5004d2991 yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -29,7 +29,6 @@
     quaternion_to_rotation_matrix, \
     rotation_matrix_to_quaternion
 from .shader_objects import known_shaders, ShaderProgram
-from yt.convenience import load
 from yt.visualization.image_writer import apply_colormap
 
 bbox_vertices = np.array(
@@ -753,7 +752,7 @@
 
 
     def setup_fb(self, width, height):
-        '''Setups FrameBuffer that will be used as container
+        '''Sets up FrameBuffer that will be used as container
            for 1 pass of rendering'''
         # Clean up old FB and Texture
         if self.fb_texture is not None and \


https://bitbucket.org/yt_analysis/yt/commits/b8e81a38bd4e/
Changeset:   b8e81a38bd4e
Branch:      yt
User:        atmyers
Date:        2016-04-20 23:57:55+00:00
Summary:     using fragment shader to handle color bar
Affected #:  3 files

diff -r cf50b06fc61a5d693de786b5ec90a1f5004d2991 -r b8e81a38bd4e4e8befeac9c36f5b94852bda916f yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -29,7 +29,6 @@
     quaternion_to_rotation_matrix, \
     rotation_matrix_to_quaternion
 from .shader_objects import known_shaders, ShaderProgram
-from yt.visualization.image_writer import apply_colormap
 
 bbox_vertices = np.array(
       [[ 0.,  0.,  0.,  1.],
@@ -239,7 +238,7 @@
             z = 0.0
         else:
             z = np.sqrt(1.0 - mag**2)
-        return np.array([x, -y, z])
+        return np.array([x, y, -z])
 
     def update_orientation(self, start_x, start_y, end_x, end_y):
         old = self._map_to_surface(start_x, start_y)
@@ -565,19 +564,20 @@
         self.data_source = None
         self.redraw = True
         self.camera = None
+        self.cmap_texture = None
 
         GL.glEnable(GL.GL_DEPTH_TEST)
         GL.glDepthFunc(GL.GL_LESS)
         GL.glEnable(GL.GL_CULL_FACE)
         GL.glCullFace(GL.GL_BACK)
 
-        vertices, colors, indices = self.get_mesh_data(ds, field)
+        vertices, data, indices = self.get_mesh_data(ds, field)
 
         self._initialize_vertex_array("mesh_info")
         GL.glBindVertexArray(self.vert_arrays["mesh_info"])
 
         self.add_vert_attrib("vertex_buffer", vertices, vertices.size)
-        self.add_vert_attrib("color_buffer", colors, colors.size)
+        self.add_vert_attrib("data_buffer", data, data.size)
 
         self.vert_attrib["element_buffer"] = (GL.glGenBuffers(1), indices.size)
         GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.vert_attrib["element_buffer"][0])
@@ -586,6 +586,9 @@
         self.transform_matrix = GL.glGetUniformLocation(self.program.program,
                                                         "model_to_clip")
 
+        self.cmin = data.min()
+        self.cmax = data.max()
+
     def set_camera(self, camera):
         r""" Sets the camera orientation for the entire scene.
 
@@ -595,8 +598,35 @@
 
         """
         self.camera = camera
+        self.camera.cmap_min = float(self.cmin)
+        self.camera.cmap_max = float(self.cmax)
         self.redraw = True
 
+    def setup_cmap_tex(self):
+        '''Creates 1D texture that will hold colormap in framebuffer'''
+        self.cmap_texture = GL.glGenTextures(1)   # create target texture
+        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
+        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
+        GL.glTexParameterf(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)
+        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
+        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
+        GL.glTexImage1D(GL.GL_TEXTURE_1D, 0, GL.GL_RGBA, 256,
+                        0, GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
+        GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
+
+    def update_cmap_tex(self):
+        '''Updates 1D texture with colormap that's used in framebuffer'''
+        if self.camera is None or not self.camera.cmap_new:
+            return
+
+        if self.cmap_texture is None:
+            self.setup_cmap_tex()
+
+        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
+        GL.glTexSubImage1D(GL.GL_TEXTURE_1D, 0, 0, 256,
+                           GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
+        self.camera.cmap_new = False
+
     def update_minmax(self):
         pass
 
@@ -616,10 +646,6 @@
         indices  = ds.index.meshes[mesh_id-1].connectivity_indices - offset
         data = ds._vars['vals_nod_var%d' % (field_ind + 1)][ds.step]
 
-        colors = apply_colormap(data, (data.min(), data.max()), 'algae') / 255.0
-        colors = colors.squeeze()
-        colors = colors[:, 0:3]
-
         if indices.shape[1] == 8:
             tri_array = triangulate_hex
         elif indices.shape[1] == 4:
@@ -637,27 +663,38 @@
         tri_indices = np.array(tri_indices)
 
         v = vertices.astype(np.float32).flatten()
-        c = colors.astype(np.float32).flatten()
+        d = data.astype(np.float32).flatten()
         i = tri_indices.astype(np.uint32).flatten()
 
-        return v, c, i
+        return v, d, i
 
     def run_program(self):
         """ Renders one frame of the scene. """
         with self.program.enable():
+
+            # Handle colormap
+            self.update_cmap_tex()
+
             GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
             projection_matrix = self.camera.projection_matrix
             view_matrix = self.camera.view_matrix
             model_to_clip = np.dot(projection_matrix, view_matrix)
             GL.glUniformMatrix4fv(self.transform_matrix, 1, True, model_to_clip)
 
+            GL.glActiveTexture(GL.GL_TEXTURE1)
+            GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
+
+            self.program._set_uniform("cmap", 0)
+            self.program._set_uniform("cmap_min", self.camera.cmap_min)
+            self.program._set_uniform("cmap_max", self.camera.cmap_max)
+
             GL.glEnableVertexAttribArray(0)
             GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vert_attrib["vertex_buffer"][0])
             GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, False, 0, ctypes.c_void_p(0))
 
             GL.glEnableVertexAttribArray(1)
-            GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vert_attrib["color_buffer"][0])
-            GL.glVertexAttribPointer(1, 3, GL.GL_FLOAT, False, 0, ctypes.c_void_p(0))
+            GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vert_attrib["data_buffer"][0])
+            GL.glVertexAttribPointer(1, 1, GL.GL_FLOAT, False, 0, ctypes.c_void_p(0))
 
             GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.vert_attrib["element_buffer"][0])
             GL.glDrawElements(GL.GL_TRIANGLES, self.vert_attrib["element_buffer"][1],
@@ -735,7 +772,6 @@
                         0, GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
         GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
 
-
     def update_cmap_tex(self):
         '''Updates 1D texture with colormap that's used in framebuffer'''
         if self.camera is None or not self.camera.cmap_new:
@@ -750,7 +786,6 @@
         GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
         self.camera.cmap_new = False
 
-
     def setup_fb(self, width, height):
         '''Sets up FrameBuffer that will be used as container
            for 1 pass of rendering'''

diff -r cf50b06fc61a5d693de786b5ec90a1f5004d2991 -r b8e81a38bd4e4e8befeac9c36f5b94852bda916f yt/visualization/volume_rendering/shaders/mesh.fragmentshader
--- a/yt/visualization/volume_rendering/shaders/mesh.fragmentshader
+++ b/yt/visualization/volume_rendering/shaders/mesh.fragmentshader
@@ -1,8 +1,17 @@
 #version 330 core
 
-in vec3 fragmentColor;
-out vec3 color;
+in float fragmentData;
+out vec4 color;
+
+uniform sampler1D cmap;
+uniform float cmap_min;
+uniform float cmap_max;
+
 void main()
 {
-    color = fragmentColor;
+    float data = fragmentData;
+    float cm = cmap_min;
+    float cp = cmap_max;
+
+    color = texture(cmap, (data - cm) / (cp - cm));
 }

diff -r cf50b06fc61a5d693de786b5ec90a1f5004d2991 -r b8e81a38bd4e4e8befeac9c36f5b94852bda916f yt/visualization/volume_rendering/shaders/mesh.vertexshader
--- a/yt/visualization/volume_rendering/shaders/mesh.vertexshader
+++ b/yt/visualization/volume_rendering/shaders/mesh.vertexshader
@@ -1,11 +1,11 @@
 #version 330 core
 
 layout(location = 0) in vec3 vertexPosition_modelspace;
-layout(location = 1) in vec3 vertexColor;
-out vec3 fragmentColor;
+layout(location = 1) in float vertexData;
+out float fragmentData;
 uniform mat4 model_to_clip;
 void main()
 {
     gl_Position = model_to_clip * vec4(vertexPosition_modelspace, 1);
-    fragmentColor = vertexColor;
+    fragmentData = vertexData;
 }


https://bitbucket.org/yt_analysis/yt/commits/360d0b6bd43a/
Changeset:   360d0b6bd43a
Branch:      yt
User:        atmyers
Date:        2016-04-21 00:36:33+00:00
Summary:     A little refactoring to avoid duplicating colorbar stuff.
Affected #:  2 files

diff -r b8e81a38bd4e4e8befeac9c36f5b94852bda916f -r 360d0b6bd43a50e8f0f832789a3909f0add43363 yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -553,18 +553,59 @@
                         GL.GL_RED, GL.GL_FLOAT, n_data.T)
             GL.glGenerateMipmap(GL.GL_TEXTURE_3D)
 
+class ColorBarSceneComponent(SceneComponent):
+    ''' 
 
-class MeshScene(SceneComponent):
+    A class for scene components that apply colorbars using a 1D texture. 
+
+    '''
+
+    def __init__(self):
+        super(ColorBarSceneComponent, self).__init__()
+        self.camera = None
+        self.cmap_texture = None
+
+    def set_camera(self, camera):
+        pass
+
+    def update_minmax(self):
+        pass
+
+    def setup_cmap_tex(self):
+        '''Creates 1D texture that will hold colormap in framebuffer'''
+        self.cmap_texture = GL.glGenTextures(1)   # create target texture
+        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
+        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
+        GL.glTexParameterf(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)
+        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
+        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
+        GL.glTexImage1D(GL.GL_TEXTURE_1D, 0, GL.GL_RGBA, 256,
+                        0, GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
+        GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
+
+    def update_cmap_tex(self):
+        '''Updates 1D texture with colormap that's used in framebuffer'''
+        if self.camera is None or not self.camera.cmap_new:
+            return
+
+        if self.cmap_texture is None:
+            self.setup_cmap_tex()
+
+        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
+        GL.glTexSubImage1D(GL.GL_TEXTURE_1D, 0, 0, 256,
+                           GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
+        self.camera.cmap_new = False
+    
+
+class MeshSceneComponent(ColorBarSceneComponent):
 
     def __init__(self, ds, field):
-        super(MeshScene, self).__init__()
+        super(MeshSceneComponent, self).__init__()
         self.set_shader("mesh.v")
         self.set_shader("mesh.f")
 
         self.data_source = None
         self.redraw = True
-        self.camera = None
-        self.cmap_texture = None
 
         GL.glEnable(GL.GL_DEPTH_TEST)
         GL.glDepthFunc(GL.GL_LESS)
@@ -602,34 +643,6 @@
         self.camera.cmap_max = float(self.cmax)
         self.redraw = True
 
-    def setup_cmap_tex(self):
-        '''Creates 1D texture that will hold colormap in framebuffer'''
-        self.cmap_texture = GL.glGenTextures(1)   # create target texture
-        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
-        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
-        GL.glTexParameterf(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)
-        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
-        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
-        GL.glTexImage1D(GL.GL_TEXTURE_1D, 0, GL.GL_RGBA, 256,
-                        0, GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
-        GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
-
-    def update_cmap_tex(self):
-        '''Updates 1D texture with colormap that's used in framebuffer'''
-        if self.camera is None or not self.camera.cmap_new:
-            return
-
-        if self.cmap_texture is None:
-            self.setup_cmap_tex()
-
-        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
-        GL.glTexSubImage1D(GL.GL_TEXTURE_1D, 0, 0, 256,
-                           GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
-        self.camera.cmap_new = False
-
-    def update_minmax(self):
-        pass
-
     def get_mesh_data(self, ds, field):
         """
         
@@ -706,7 +719,7 @@
     render = run_program
 
 
-class SceneGraph(SceneComponent):
+class SceneGraph(ColorBarSceneComponent):
     """A basic OpenGL render for IDV.
 
     The SceneGraph class is the primary driver behind creating a IDV rendering.
@@ -727,8 +740,6 @@
         self.collections = []
         self.fbo = None
         self.fb_texture = None
-        self.cmap_texture = None
-        self.camera = None
         self.shader_program = None
         self.fb_shader_program = None
         self.min_val, self.max_val = 1e60, -1e60
@@ -760,32 +771,6 @@
 
         self.setup_fb(self.width, self.height)
 
-    def setup_cmap_tex(self):
-        '''Creates 1D texture that will hold colormap in framebuffer'''
-        self.cmap_texture = GL.glGenTextures(1)   # create target texture
-        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
-        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
-        GL.glTexParameterf(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)
-        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
-        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
-        GL.glTexImage1D(GL.GL_TEXTURE_1D, 0, GL.GL_RGBA, 256,
-                        0, GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
-        GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
-
-    def update_cmap_tex(self):
-        '''Updates 1D texture with colormap that's used in framebuffer'''
-        if self.camera is None or not self.camera.cmap_new:
-            return
-
-        if self.cmap_texture is None:
-            self.setup_cmap_tex()
-
-        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
-        GL.glTexSubImage1D(GL.GL_TEXTURE_1D, 0, 0, 256,
-                           GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
-        GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
-        self.camera.cmap_new = False
-
     def setup_fb(self, width, height):
         '''Sets up FrameBuffer that will be used as container
            for 1 pass of rendering'''
@@ -841,7 +826,6 @@
         status = GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)
         assert status == GL.GL_FRAMEBUFFER_COMPLETE, status
 
-
     def add_collection(self, collection):
         r"""Adds a block collection to the scene. Collections must not overlap.
 

diff -r b8e81a38bd4e4e8befeac9c36f5b94852bda916f -r 360d0b6bd43a50e8f0f832789a3909f0add43363 yt/visualization/volume_rendering/interactive_vr_helpers.py
--- a/yt/visualization/volume_rendering/interactive_vr_helpers.py
+++ b/yt/visualization/volume_rendering/interactive_vr_helpers.py
@@ -52,8 +52,10 @@
 
     '''
 
-    from .interactive_vr import SceneGraph, BlockCollection, TrackballCamera, \
-        MeshScene
+    from .interactive_vr import SceneGraph, \
+        BlockCollection, \
+        TrackballCamera, \
+        MeshSceneComponent
     from .interactive_loop import RenderingContext
 
     if isinstance(data_source, Dataset):
@@ -76,8 +78,10 @@
     rc = RenderingContext(*window_size)
 
     if hasattr(dobj.ds.index, "meshes"):
+        # unstructured mesh datasets tend to have tight
+        # domain boundaries, do some extra padding here.
         cam_position = 3.0*dobj.ds.domain_right_edge
-        scene = MeshScene(data_source, field)
+        scene = MeshSceneComponent(data_source, field)
     else:
         scene = SceneGraph()
         collection = BlockCollection()


https://bitbucket.org/yt_analysis/yt/commits/723650b6cb91/
Changeset:   723650b6cb91
Branch:      yt
User:        atmyers
Date:        2016-04-21 00:37:12+00:00
Summary:     short docstring
Affected #:  1 file

diff -r 360d0b6bd43a50e8f0f832789a3909f0add43363 -r 723650b6cb919fc093579a4530e3feec2c9dafa4 yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -596,8 +596,12 @@
                            GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
         self.camera.cmap_new = False
     
+class MeshSceneComponent(ColorBarSceneComponent):
+    '''
 
-class MeshSceneComponent(ColorBarSceneComponent):
+    A scene component for representing unstructured mesh data.
+
+    '''
 
     def __init__(self, ds, field):
         super(MeshSceneComponent, self).__init__()


https://bitbucket.org/yt_analysis/yt/commits/78f85412d0cf/
Changeset:   78f85412d0cf
Branch:      yt
User:        atmyers
Date:        2016-04-21 00:41:47+00:00
Summary:     revert change to _map_to_surface
Affected #:  1 file

diff -r 723650b6cb919fc093579a4530e3feec2c9dafa4 -r 78f85412d0cf7c61a0f617ab1fec8476716f78b5 yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -238,7 +238,7 @@
             z = 0.0
         else:
             z = np.sqrt(1.0 - mag**2)
-        return np.array([x, y, -z])
+        return np.array([x, -y, z])
 
     def update_orientation(self, start_x, start_y, end_x, end_y):
         old = self._map_to_surface(start_x, start_y)


https://bitbucket.org/yt_analysis/yt/commits/b0ccd3fe75de/
Changeset:   b0ccd3fe75de
Branch:      yt
User:        atmyers
Date:        2016-04-21 22:47:20+00:00
Summary:     give streaming unstructured datasets a default field, too.
Affected #:  1 file

diff -r 78f85412d0cf7c61a0f617ab1fec8476716f78b5 -r b0ccd3fe75deab518d53917e45ed7309e145f93e yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1826,5 +1826,7 @@
 
     sds._node_fields = node_data[0].keys()
     sds._elem_fields = elem_data[0].keys()
+    sds.default_field = [f for f in sds.field_list 
+                         if f[0] == 'connect1'][-1]
 
     return sds


https://bitbucket.org/yt_analysis/yt/commits/d73edf0896fe/
Changeset:   d73edf0896fe
Branch:      yt
User:        atmyers
Date:        2016-04-21 23:03:32+00:00
Summary:     more general way of getting the vertex data in MeshSceneComponent.
Affected #:  2 files

diff -r b0ccd3fe75deab518d53917e45ed7309e145f93e -r d73edf0896fe44f200f52313110036692d1ce893 yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -603,7 +603,7 @@
 
     '''
 
-    def __init__(self, ds, field):
+    def __init__(self, data_source, field):
         super(MeshSceneComponent, self).__init__()
         self.set_shader("mesh.v")
         self.set_shader("mesh.f")
@@ -616,7 +616,7 @@
         GL.glEnable(GL.GL_CULL_FACE)
         GL.glCullFace(GL.GL_BACK)
 
-        vertices, data, indices = self.get_mesh_data(ds, field)
+        vertices, data, indices = self.get_mesh_data(data_source, field)
 
         self._initialize_vertex_array("mesh_info")
         GL.glBindVertexArray(self.vert_arrays["mesh_info"])
@@ -647,21 +647,25 @@
         self.camera.cmap_max = float(self.cmax)
         self.redraw = True
 
-    def get_mesh_data(self, ds, field):
+    def get_mesh_data(self, data_source, field):
         """
         
         This reads the mesh data into a form that can be fed in to OpenGL.
         
         """
 
+        # get mesh information
         ftype, fname = field
         mesh_id = int(ftype[-1])
-        offset = ds.index.io._INDEX_OFFSET
-        field_ind = ds.index.io.node_fields.index(fname)
+        mesh = data_source.ds.index.meshes[mesh_id-1]
+        offset = mesh._index_offset
+        vertices = mesh.connectivity_coords
+        indices  = mesh.connectivity_indices - offset
 
-        vertices = ds.index.meshes[mesh_id-1].connectivity_coords
-        indices  = ds.index.meshes[mesh_id-1].connectivity_indices - offset
-        data = ds._vars['vals_nod_var%d' % (field_ind + 1)][ds.step]
+        # get vertex data
+        data = data_source[field]
+        vertex_data = np.zeros(vertices.shape[0], dtype=data.dtype)
+        vertex_data[indices.flatten()] = data.flatten()
 
         if indices.shape[1] == 8:
             tri_array = triangulate_hex
@@ -676,11 +680,10 @@
         for elem in indices:
             for tri in tri_array:
                 tri_indices.append(elem[tri])
-
         tri_indices = np.array(tri_indices)
 
         v = vertices.astype(np.float32).flatten()
-        d = data.astype(np.float32).flatten()
+        d = vertex_data.astype(np.float32).flatten()
         i = tri_indices.astype(np.uint32).flatten()
 
         return v, d, i

diff -r b0ccd3fe75deab518d53917e45ed7309e145f93e -r d73edf0896fe44f200f52313110036692d1ce893 yt/visualization/volume_rendering/interactive_vr_helpers.py
--- a/yt/visualization/volume_rendering/interactive_vr_helpers.py
+++ b/yt/visualization/volume_rendering/interactive_vr_helpers.py
@@ -81,7 +81,7 @@
         # unstructured mesh datasets tend to have tight
         # domain boundaries, do some extra padding here.
         cam_position = 3.0*dobj.ds.domain_right_edge
-        scene = MeshSceneComponent(data_source, field)
+        scene = MeshSceneComponent(dobj, field)
     else:
         scene = SceneGraph()
         collection = BlockCollection()


https://bitbucket.org/yt_analysis/yt/commits/1fd7b253a1e7/
Changeset:   1fd7b253a1e7
Branch:      yt
User:        atmyers
Date:        2016-05-04 18:21:53+00:00
Summary:     merging with tip.
Affected #:  70 files

diff -r d73edf0896fe44f200f52313110036692d1ce893 -r 1fd7b253a1e7441fec9974baa8a5953b459e1851 doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -1,394 +1,4 @@
-#
-# Hi there!  Welcome to the yt installation script.
-#
-# This script is designed to create a fully isolated Python installation
-# with the dependencies you need to run yt.
-#
-# This script is based on Conda, a distribution mechanism from Continuum
-# Analytics.  The process is as follows:
-#
-#  1. Download the appropriate Conda installation package
-#  2. Install Conda into the specified directory
-#  3. Install yt-specific dependencies
-#  4. Install yt
-#
-# There are a few options listed below, but by default, this will install
-# everything.  At the end, it will tell you what to do to use yt.
-#
-# By default this will install yt from source.
-#
-# If you experience problems, please visit the Help section at
-# http://yt-project.org.
-#
-DEST_SUFFIX="yt-conda"
-DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
-BRANCH="yt" # This is the branch to which we will forcibly update.
-INST_YT_SOURCE=1 # Do we do a source install of yt?
-INST_UNSTRUCTURED=1 # Do we want to build with unstructured mesh support?
-
-##################################################################
-#                                                                #
-# You will likely not have to modify anything below this region. #
-#                                                                #
-##################################################################
-
-LOG_FILE="`pwd`/yt_install.log"
-
-# Here is the idiom for redirecting to the log file:
-# ( SOMECOMMAND 2>&1 ) 1>> ${LOG_FILE} || do_exit
-
-MINICONDA_URLBASE="http://repo.continuum.io/miniconda"
-MINICONDA_VERSION="latest"
-YT_RECIPE_REPO="https://bitbucket.org/yt_analysis/yt_conda/raw/default"
-
-if [ $INST_UNSTRUCTURED -eq 1 ]
-then
-  if [ $INST_YT_SOURCE -eq 0 ]
-  then
-      echo "yt must be compiled from source to use the unstructured mesh support."
-      echo "Please set INST_YT_SOURCE to 1 and re-run."
-      exit 1
-  fi
-  if [ `uname` = "Darwin" ]
-  then
-      EMBREE="embree-2.8.0.x86_64.macosx"
-      EMBREE_URL="https://github.com/embree/embree/releases/download/v2.8.0/$EMBREE.tar.gz"
-  else
-      EMBREE="embree-2.8.0.x86_64.linux"
-      EMBREE_URL="https://github.com/embree/embree/releases/download/v2.8.0/$EMBREE.tar.gz"
-  fi
-  PYEMBREE_URL="https://github.com/scopatz/pyembree/archive/master.zip"
-fi
-
-function do_exit
-{
-    echo "********************************************"
-    echo "        FAILURE REPORT:"
-    echo "********************************************"
-    echo
-    tail -n 10 ${LOG_FILE}
-    echo
-    echo "********************************************"
-    echo "********************************************"
-    echo "Failure.  Check ${LOG_FILE}.  The last 10 lines are above."
-    exit 1
-}
-
-function log_cmd
-{
-    echo "EXECUTING:" >> ${LOG_FILE}
-    echo "  $*" >> ${LOG_FILE}
-    ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-}
-
-# These are needed to prevent pushd and popd from printing to stdout
-
-function pushd () {
-    command pushd "$@" > /dev/null
-}
-
-function popd () {
-    command popd "$@" > /dev/null
-}
-
-function get_ytdata
-{
-    echo "Downloading $1 from yt-project.org"
-    [ -e $1 ] && return
-    ${GETFILE} "http://yt-project.org/data/$1" || do_exit
-    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
-}
-
-function get_ytrecipe {
-    RDIR=${DEST_DIR}/src/yt-recipes/$1
-    mkdir -p ${RDIR}
-    pushd ${RDIR}
-    log_cmd ${GETFILE} ${YT_RECIPE_REPO}/$1/meta.yaml
-    log_cmd ${GETFILE} ${YT_RECIPE_REPO}/$1/build.sh
-    NEW_PKG=`conda build --output ${RDIR}`
-    log_cmd conda build --no-binstar-upload ${RDIR}
-    log_cmd conda install ${NEW_PKG}
-    popd
-}
-
-
-echo
-echo
-echo "========================================================================"
-echo
-echo "Hi there!  This is the yt installation script.  We're going to download"
-echo "some stuff and install it to create a self-contained, isolated"
-echo "environment for yt to run within."
-echo
-echo "This will install Miniconda from Continuum Analytics, the necessary"
-echo "packages to run yt, and create a self-contained environment for you to"
-echo "use yt.  Additionally, Conda itself provides the ability to install"
-echo "many other packages that can be used for other purposes using the"
-echo "'conda install' command."
-echo
-MYOS=`uname -s`       # A guess at the OS
-if [ $INST_YT_SOURCE -ne 0 ]
-then
-    if [ "${MYOS##Darwin}" != "${MYOS}" ]
-    then
-        echo "Looks like you're running on Mac OSX."
-        echo
-        echo "NOTE: you must have the Xcode command line tools installed."
-        echo
-        echo "The instructions for obtaining these tools varies according"
-        echo "to your exact OS version.  On older versions of OS X, you"
-        echo "must register for an account on the apple developer tools"
-        echo "website: https://developer.apple.com/downloads to obtain the"
-        echo "download link."
-        echo
-        echo "We have gathered some additional instructions for each"
-        echo "version of OS X below. If you have trouble installing yt"
-        echo "after following these instructions, don't hesitate to contact"
-        echo "the yt user's e-mail list."
-        echo
-        echo "You can see which version of OSX you are running by clicking"
-        echo "'About This Mac' in the apple menu on the left hand side of"
-        echo "menu bar.  We're assuming that you've installed all operating"
-        echo "system updates; if you have an older version, we suggest"
-        echo "running software update and installing all available updates."
-        echo
-        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
-        echo "Apple developer tools website."
-        echo
-        echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
-        echo "developer tools website.  You can either download the"
-        echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-        echo "Software Update to update to XCode 3.2.6 or"
-        echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
-        echo "bundle (4.1 GB)."
-        echo
-        echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
-        echo "(search for Xcode)."
-        echo "Alternatively, download the Xcode command line tools from"
-        echo "the Apple developer tools website."
-        echo
-        echo "OS X 10.8.4, 10.9, 10.10, and 10.11:"
-        echo "download the appropriate version of Xcode from the"
-        echo "mac app store (search for Xcode)."
-        echo
-        echo "Additionally, you will have to manually install the Xcode"
-        echo "command line tools."
-        echo
-        echo "For OS X 10.8, see:"
-        echo "http://stackoverflow.com/questions/9353444"
-        echo
-        echo "For OS X 10.9 and newer the command line tools can be installed"
-        echo "with the following command:"
-        echo "    xcode-select --install"
-    fi
-    if [ "${MYOS##Linux}" != "${MYOS}" ]
-    then
-        echo "Looks like you're on Linux."
-        echo
-        echo "Please make sure you have the developer tools for your OS "
-        echo "installed."
-        echo
-        if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
-        then
-            echo "Looks like you're on an OpenSUSE-compatible machine."
-            echo
-            echo "You need to have these packages installed:"
-            echo
-            echo "  * devel_C_C++"
-            echo "  * libuuid-devel"
-            echo "  * gcc-c++"
-            echo "  * chrpath"
-            echo
-            echo "You can accomplish this by executing:"
-            echo
-            echo "$ sudo zypper install -t pattern devel_C_C++"
-            echo "$ sudo zypper install gcc-c++ libuuid-devel zip"
-            echo "$ sudo zypper install chrpath"
-        fi
-        if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
-        then
-            echo "Looks like you're on an Ubuntu-compatible machine."
-            echo
-            echo "You need to have these packages installed:"
-            echo
-            echo "  * libssl-dev"
-            echo "  * build-essential"
-            echo "  * libncurses5"
-            echo "  * libncurses5-dev"
-            echo "  * uuid-dev"
-            echo "  * chrpath"
-            echo
-            echo "You can accomplish this by executing:"
-            echo
-            echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev chrpath"
-            echo
-        fi
-        echo
-        echo "If you are running on a supercomputer or other module-enabled"
-        echo "system, please make sure that the GNU module has been loaded."
-        echo
-    fi
-fi
-if [ "${MYOS##x86_64}" != "${MYOS}" ]
-then
-    MINICONDA_OS="Linux-x86_64"
-elif [ "${MYOS##i386}" != "${MYOS}" ]
-then
-    MINICONDA_OS="Linux-x86"
-elif [ "${MYOS##Darwin}" != "${MYOS}" ]
-then
-     MINICONDA_OS="MacOSX-x86_64"
-else
-    echo "Not sure which Linux distro you are running."
-    echo "Going with x86_64 architecture."
-    MINICONDA_OS="Linux-x86_64"
-fi
-echo
-echo "If you'd rather not continue, hit Ctrl-C."
-echo
-echo "========================================================================"
-echo
-read -p "[hit enter] "
-echo
-echo "Awesome!  Here we go."
-echo
-
-MINICONDA_PKG=Miniconda-${MINICONDA_VERSION}-${MINICONDA_OS}.sh
-
-if type -P wget &>/dev/null
-then
-    echo "Using wget"
-    export GETFILE="wget -nv -nc"
-else
-    echo "Using curl"
-    export GETFILE="curl -sSO"
-fi
-
-echo
-echo "Downloading ${MINICONDA_URLBASE}/${MINICONDA_PKG}"
-echo "Downloading ${MINICONDA_URLBASE}/${MINICONDA_PKG}" >> ${LOG_FILE}
-echo
-
-${GETFILE} ${MINICONDA_URLBASE}/${MINICONDA_PKG} || do_exit
-
-echo "Installing the Miniconda python environment."
-
-log_cmd bash ./${MINICONDA_PKG} -b -p $DEST_DIR
-
-# This we *do* need.
-export PATH=${DEST_DIR}/bin:$PATH
-
-echo "Installing the necessary packages for yt."
-echo "This may take a while, but don't worry.  yt loves you."
-
-declare -a YT_DEPS
-YT_DEPS+=('python')
-YT_DEPS+=('setuptools')
-YT_DEPS+=('numpy')
-YT_DEPS+=('jupyter')
-YT_DEPS+=('ipython')
-YT_DEPS+=('sphinx')
-YT_DEPS+=('h5py')
-YT_DEPS+=('matplotlib')
-YT_DEPS+=('cython')
-YT_DEPS+=('nose')
-YT_DEPS+=('conda-build')
-YT_DEPS+=('mercurial')
-YT_DEPS+=('sympy')
-
-if [ $INST_UNSTRUCTURED -eq 1 ]
-then
-  YT_DEPS+=('netcdf4')
-fi
-
-# Here is our dependency list for yt
-log_cmd conda update --yes conda
-
-log_cmd echo "DEPENDENCIES" ${YT_DEPS[@]}
-for YT_DEP in "${YT_DEPS[@]}"; do
-    echo "Installing $YT_DEP"
-    log_cmd conda install --yes ${YT_DEP}
-done
-
-if [ $INST_UNSTRUCTURED -eq 1 ]
-then
-
-  echo "Installing embree"
-  mkdir ${DEST_DIR}/src
-  cd ${DEST_DIR}/src
-  ( ${GETFILE} "$EMBREE_URL" 2>&1 ) 1>> ${LOG_FILE} || do_exit
-  log_cmd tar xfz ${EMBREE}.tar.gz
-  log_cmd mv ${DEST_DIR}/src/${EMBREE}/include/embree2 ${DEST_DIR}/include
-  log_cmd mv ${DEST_DIR}/src/${EMBREE}/lib/lib*.* ${DEST_DIR}/lib
-  if [ `uname` = "Darwin" ]
-  then
-    ln -s ${DEST_DIR}/lib/libembree.2.dylib ${DEST_DIR}/lib/libembree.dylib
-    install_name_tool -id ${DEST_DIR}/lib/libembree.2.dylib ${DEST_DIR}/lib/libembree.2.dylib
-  else
-    ln -s ${DEST_DIR}/lib/libembree.so.2 ${DEST_DIR}/lib/libembree.so
-  fi
-
-  echo "Installing pyembree from source"
-  ( ${GETFILE} "$PYEMBREE_URL" 2>&1 ) 1>> ${LOG_FILE} || do_exit
-  log_cmd unzip ${DEST_DIR}/src/master.zip
-  pushd ${DEST_DIR}/src/pyembree-master
-  log_cmd python setup.py install build_ext -I${DEST_DIR}/include -L${DEST_DIR}/lib
-  popd
-fi
-
-if [ $INST_YT_SOURCE -eq 0 ]
-then
-  echo "Installing yt"
-  log_cmd conda install --yes yt
-else
-    # We do a source install.
-    echo "Installing yt from source"
-    YT_DIR="${DEST_DIR}/src/yt-hg"
-    log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
-if [ $INST_UNSTRUCTURED -eq 1 ]
-then
-    echo $DEST_DIR > ${YT_DIR}/embree.cfg
-fi
-    pushd ${YT_DIR}
-    log_cmd python setup.py develop
-    popd
-fi
-
-echo
-echo
-echo "========================================================================"
-echo
-echo "yt and the Conda system are now installed in $DEST_DIR ."
-echo
-echo "You must now modify your PATH variable by prepending:"
-echo
-echo "   $DEST_DIR/bin"
-echo
-echo "On Bash-style shells you can copy/paste the following command to "
-echo "temporarily activate the yt installation:"
-echo
-echo "    export PATH=$DEST_DIR/bin:\$PATH"
-echo
-echo "and on csh-style shells:"
-echo
-echo "    setenv PATH $DEST_DIR/bin:\$PATH"
-echo
-echo "You can also update the init file appropriate for your shell to include"
-echo "the same command."
-echo
-echo "To get started with yt, check out the orientation:"
-echo
-echo "    http://yt-project.org/doc/orientation/"
-echo
-echo "For support, see the website and join the mailing list:"
-echo
-echo "    http://yt-project.org/"
-echo "    http://yt-project.org/data/      (Sample data)"
-echo "    http://yt-project.org/doc/       (Docs)"
-echo
-echo "    http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org"
-echo
-echo "========================================================================"
-echo
-echo "Oh, look at me, still talking when there's science to do!"
-echo "Good luck, and email the user list if you run into any problems."
+echo "This script has been deprecated."
+echo "You can now create a conda-based build using install_script.sh"
+echo "Please download that script and run it"
+exit 0

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/1e24bccc1675/
Changeset:   1e24bccc1675
Branch:      yt
User:        atmyers
Date:        2016-05-04 18:25:31+00:00
Summary:     restoring import that I removed during merge.
Affected #:  1 file

diff -r 1fd7b253a1e7441fec9974baa8a5953b459e1851 -r 1e24bccc167538a6cd372e0e1a622e2b929d1ee3 yt/visualization/volume_rendering/interactive_vr_helpers.py
--- a/yt/visualization/volume_rendering/interactive_vr_helpers.py
+++ b/yt/visualization/volume_rendering/interactive_vr_helpers.py
@@ -58,7 +58,8 @@
         raise ImportError("This functionality requires the cyglfw3 and PyOpenGL "
                           "packages to be installed.")
 
-    from .interactive_vr import SceneGraph, BlockCollection, TrackballCamera
+    from .interactive_vr import SceneGraph, BlockCollection, TrackballCamera, \
+        MeshSceneComponent
     from .interactive_loop import RenderingContext
 
     if isinstance(data_source, Dataset):


https://bitbucket.org/yt_analysis/yt/commits/d39510be5062/
Changeset:   d39510be5062
Branch:      yt
User:        ngoldbaum
Date:        2016-05-18 18:23:47+00:00
Summary:     Merged in atmyers/yt (pull request #2135)

Interactive Unstructured Mesh Rendering with OpenGL
Affected #:  9 files

diff -r ff0ec06d8bd66146811459f03f99c6cf7b7b0883 -r d39510be50625c14c6ee0ecb2d0195ff72a76dd8 yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -154,7 +154,8 @@
                                               units_override=units_override)
         self.index_filename = filename
         self.storage_filename = storage_filename
-        self.default_field = ("connect1", "diffused")
+        self.default_field = [f for f in self.field_list 
+                              if f[0] == 'connect1'][-1]
 
     def _set_code_unit_attributes(self):
         # This is where quantities are created that represent the various

diff -r ff0ec06d8bd66146811459f03f99c6cf7b7b0883 -r d39510be50625c14c6ee0ecb2d0195ff72a76dd8 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1826,5 +1826,7 @@
 
     sds._node_fields = node_data[0].keys()
     sds._elem_fields = elem_data[0].keys()
+    sds.default_field = [f for f in sds.field_list 
+                         if f[0] == 'connect1'][-1]
 
     return sds

diff -r ff0ec06d8bd66146811459f03f99c6cf7b7b0883 -r d39510be50625c14c6ee0ecb2d0195ff72a76dd8 yt/utilities/lib/mesh_construction.h
--- a/yt/utilities/lib/mesh_construction.h
+++ b/yt/utilities/lib/mesh_construction.h
@@ -37,7 +37,7 @@
   {-1, -1, -1}
 };
 
-// Triangule wedges
+// Triangulate wedges
 int triangulate_wedge[MAX_NUM_TRI][3] = {
   {0, 1, 2},
   {0, 3, 1},

diff -r ff0ec06d8bd66146811459f03f99c6cf7b7b0883 -r d39510be50625c14c6ee0ecb2d0195ff72a76dd8 yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -18,6 +18,7 @@
 from collections import OrderedDict
 import matplotlib.cm as cm
 import numpy as np
+import ctypes
 
 from yt.utilities.math_utils import \
     get_translate_matrix, \
@@ -29,7 +30,6 @@
     rotation_matrix_to_quaternion
 from .shader_objects import known_shaders, ShaderProgram
 
-
 bbox_vertices = np.array(
       [[ 0.,  0.,  0.,  1.],
        [ 0.,  0.,  1.,  1.],
@@ -77,6 +77,27 @@
      +1.0, +1.0, 0.0], dtype=np.float32
 )
 
+triangulate_hex = np.array([
+    [0, 2, 1], [0, 3, 2],
+    [4, 5, 6], [4, 6, 7],
+    [0, 1, 5], [0, 5, 4],
+    [1, 2, 6], [1, 6, 5],
+    [0, 7, 3], [0, 4, 7],
+    [3, 6, 2], [3, 7, 6]]
+)
+
+triangulate_tetra = np.array([
+    [0, 1, 3], [2, 3, 1],
+    [0, 3, 2], [0, 2, 1]]
+)
+
+triangulate_wedge = np.array([
+    [3, 0, 1], [4, 3, 1],
+    [2, 5, 4], [2, 4, 1],
+    [0, 3, 2], [2, 3, 5],
+    [3, 4, 5], [0, 2, 1]]
+)
+
 
 class IDVCamera(object):
     '''Camera object used in the Interactive Data Visualization
@@ -532,8 +553,180 @@
                         GL.GL_RED, GL.GL_FLOAT, n_data.T)
             GL.glGenerateMipmap(GL.GL_TEXTURE_3D)
 
+class ColorBarSceneComponent(SceneComponent):
+    ''' 
 
-class SceneGraph(SceneComponent):
+    A class for scene components that apply colorbars using a 1D texture. 
+
+    '''
+
+    def __init__(self):
+        super(ColorBarSceneComponent, self).__init__()
+        self.camera = None
+        self.cmap_texture = None
+
+    def set_camera(self, camera):
+        pass
+
+    def update_minmax(self):
+        pass
+
+    def setup_cmap_tex(self):
+        '''Creates 1D texture that will hold colormap in framebuffer'''
+        self.cmap_texture = GL.glGenTextures(1)   # create target texture
+        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
+        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
+        GL.glTexParameterf(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)
+        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
+        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
+        GL.glTexImage1D(GL.GL_TEXTURE_1D, 0, GL.GL_RGBA, 256,
+                        0, GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
+        GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
+
+    def update_cmap_tex(self):
+        '''Updates 1D texture with colormap that's used in framebuffer'''
+        if self.camera is None or not self.camera.cmap_new:
+            return
+
+        if self.cmap_texture is None:
+            self.setup_cmap_tex()
+
+        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
+        GL.glTexSubImage1D(GL.GL_TEXTURE_1D, 0, 0, 256,
+                           GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
+        self.camera.cmap_new = False
+    
+class MeshSceneComponent(ColorBarSceneComponent):
+    '''
+
+    A scene component for representing unstructured mesh data.
+
+    '''
+
+    def __init__(self, data_source, field):
+        super(MeshSceneComponent, self).__init__()
+        self.set_shader("mesh.v")
+        self.set_shader("mesh.f")
+
+        self.data_source = None
+        self.redraw = True
+
+        GL.glEnable(GL.GL_DEPTH_TEST)
+        GL.glDepthFunc(GL.GL_LESS)
+        GL.glEnable(GL.GL_CULL_FACE)
+        GL.glCullFace(GL.GL_BACK)
+
+        vertices, data, indices = self.get_mesh_data(data_source, field)
+
+        self._initialize_vertex_array("mesh_info")
+        GL.glBindVertexArray(self.vert_arrays["mesh_info"])
+
+        self.add_vert_attrib("vertex_buffer", vertices, vertices.size)
+        self.add_vert_attrib("data_buffer", data, data.size)
+
+        self.vert_attrib["element_buffer"] = (GL.glGenBuffers(1), indices.size)
+        GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.vert_attrib["element_buffer"][0])
+        GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL.GL_STATIC_DRAW)
+
+        self.transform_matrix = GL.glGetUniformLocation(self.program.program,
+                                                        "model_to_clip")
+
+        self.cmin = data.min()
+        self.cmax = data.max()
+
+    def set_camera(self, camera):
+        r""" Sets the camera orientation for the entire scene.
+
+        Parameters
+        ----------
+        camera : Camera
+
+        """
+        self.camera = camera
+        self.camera.cmap_min = float(self.cmin)
+        self.camera.cmap_max = float(self.cmax)
+        self.redraw = True
+
+    def get_mesh_data(self, data_source, field):
+        """
+        
+        This reads the mesh data into a form that can be fed in to OpenGL.
+        
+        """
+
+        # get mesh information
+        ftype, fname = field
+        mesh_id = int(ftype[-1])
+        mesh = data_source.ds.index.meshes[mesh_id-1]
+        offset = mesh._index_offset
+        vertices = mesh.connectivity_coords
+        indices  = mesh.connectivity_indices - offset
+
+        # get vertex data
+        data = data_source[field]
+        vertex_data = np.zeros(vertices.shape[0], dtype=data.dtype)
+        vertex_data[indices.flatten()] = data.flatten()
+
+        if indices.shape[1] == 8:
+            tri_array = triangulate_hex
+        elif indices.shape[1] == 4:
+            tri_array = triangulate_tetra
+        elif indices.shape[1] == 6:
+            tri_array = triangulate_wedge
+        else:
+            raise NotImplementedError
+
+        tri_indices = []
+        for elem in indices:
+            for tri in tri_array:
+                tri_indices.append(elem[tri])
+        tri_indices = np.array(tri_indices)
+
+        v = vertices.astype(np.float32).flatten()
+        d = vertex_data.astype(np.float32).flatten()
+        i = tri_indices.astype(np.uint32).flatten()
+
+        return v, d, i
+
+    def run_program(self):
+        """ Renders one frame of the scene. """
+        with self.program.enable():
+
+            # Handle colormap
+            self.update_cmap_tex()
+
+            GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
+            projection_matrix = self.camera.projection_matrix
+            view_matrix = self.camera.view_matrix
+            model_to_clip = np.dot(projection_matrix, view_matrix)
+            GL.glUniformMatrix4fv(self.transform_matrix, 1, True, model_to_clip)
+
+            GL.glActiveTexture(GL.GL_TEXTURE1)
+            GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
+
+            self.program._set_uniform("cmap", 0)
+            self.program._set_uniform("cmap_min", self.camera.cmap_min)
+            self.program._set_uniform("cmap_max", self.camera.cmap_max)
+
+            GL.glEnableVertexAttribArray(0)
+            GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vert_attrib["vertex_buffer"][0])
+            GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, False, 0, ctypes.c_void_p(0))
+
+            GL.glEnableVertexAttribArray(1)
+            GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vert_attrib["data_buffer"][0])
+            GL.glVertexAttribPointer(1, 1, GL.GL_FLOAT, False, 0, ctypes.c_void_p(0))
+
+            GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.vert_attrib["element_buffer"][0])
+            GL.glDrawElements(GL.GL_TRIANGLES, self.vert_attrib["element_buffer"][1],
+                              GL.GL_UNSIGNED_INT, ctypes.c_void_p(0))
+
+            GL.glDisableVertexAttribArray(0)
+            GL.glDisableVertexAttribArray(1)
+
+    render = run_program
+
+
+class SceneGraph(ColorBarSceneComponent):
     """A basic OpenGL render for IDV.
 
     The SceneGraph class is the primary driver behind creating a IDV rendering.
@@ -554,8 +747,6 @@
         self.collections = []
         self.fbo = None
         self.fb_texture = None
-        self.cmap_texture = None
-        self.camera = None
         self.shader_program = None
         self.fb_shader_program = None
         self.min_val, self.max_val = 1e60, -1e60
@@ -587,36 +778,8 @@
 
         self.setup_fb(self.width, self.height)
 
-    def setup_cmap_tex(self):
-        '''Creates 1D texture that will hold colormap in framebuffer'''
-        self.cmap_texture = GL.glGenTextures(1)   # create target texture
-        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
-        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
-        GL.glTexParameterf(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)
-        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
-        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
-        GL.glTexImage1D(GL.GL_TEXTURE_1D, 0, GL.GL_RGBA, 256,
-                        0, GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
-        GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
-
-
-    def update_cmap_tex(self):
-        '''Updates 1D texture with colormap that's used in framebuffer'''
-        if self.camera is None or not self.camera.cmap_new:
-            return
-
-        if self.cmap_texture is None:
-            self.setup_cmap_tex()
-
-        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
-        GL.glTexSubImage1D(GL.GL_TEXTURE_1D, 0, 0, 256,
-                           GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
-        GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
-        self.camera.cmap_new = False
-
-
     def setup_fb(self, width, height):
-        '''Setups FrameBuffer that will be used as container
+        '''Sets up FrameBuffer that will be used as container
            for 1 pass of rendering'''
         # Clean up old FB and Texture
         if self.fb_texture is not None and \
@@ -670,7 +833,6 @@
         status = GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)
         assert status == GL.GL_FRAMEBUFFER_COMPLETE, status
 
-
     def add_collection(self, collection):
         r"""Adds a block collection to the scene. Collections must not overlap.
 

diff -r ff0ec06d8bd66146811459f03f99c6cf7b7b0883 -r d39510be50625c14c6ee0ecb2d0195ff72a76dd8 yt/visualization/volume_rendering/interactive_vr_helpers.py
--- a/yt/visualization/volume_rendering/interactive_vr_helpers.py
+++ b/yt/visualization/volume_rendering/interactive_vr_helpers.py
@@ -58,7 +58,8 @@
         raise ImportError("This functionality requires the cyglfw3 and PyOpenGL "
                           "packages to be installed.")
 
-    from .interactive_vr import SceneGraph, BlockCollection, TrackballCamera
+    from .interactive_vr import SceneGraph, BlockCollection, TrackballCamera, \
+        MeshSceneComponent
     from .interactive_loop import RenderingContext
 
     if isinstance(data_source, Dataset):
@@ -78,16 +79,23 @@
     if cam_focus is None:
         cam_focus = dobj.ds.domain_center
 
+    rc = RenderingContext(*window_size)
+
+    if hasattr(dobj.ds.index, "meshes"):
+        # unstructured mesh datasets tend to have tight
+        # domain boundaries, do some extra padding here.
+        cam_position = 3.0*dobj.ds.domain_right_edge
+        scene = MeshSceneComponent(dobj, field)
+    else:
+        scene = SceneGraph()
+        collection = BlockCollection()
+        collection.add_data(dobj, field)
+        scene.add_collection(collection)
+
     aspect_ratio = window_size[1] / window_size[0]
     far_plane = np.linalg.norm(cam_focus - cam_position) * 2.0
     near_plane = 0.01 * far_plane
 
-    rc = RenderingContext(*window_size)
-    scene = SceneGraph()
-    collection = BlockCollection()
-    collection.add_data(dobj, field)
-    scene.add_collection(collection)
-
     c = TrackballCamera(position=cam_position, focus=cam_focus, near_plane=near_plane,
                         far_plane=far_plane, aspect_ratio=aspect_ratio)
     rc.start_loop(scene, c)

diff -r ff0ec06d8bd66146811459f03f99c6cf7b7b0883 -r d39510be50625c14c6ee0ecb2d0195ff72a76dd8 yt/visualization/volume_rendering/shader_objects.py
--- a/yt/visualization/volume_rendering/shader_objects.py
+++ b/yt/visualization/volume_rendering/shader_objects.py
@@ -29,7 +29,7 @@
 class ShaderProgram(object):
     '''
     Wrapper class that compiles and links vertex and fragment shaders
-    into shader program.
+    into a shader program.
 
     Parameters
     ----------
@@ -269,3 +269,13 @@
     '''A second pass vertex shader that performs no operations on vertices'''
     _source = "passthrough.vertexshader"
     _shader_name = "passthrough.v"
+
+class MeshVertexShader(VertexShader):
+    '''A vertex shader used for unstructured mesh rendering.'''
+    _source = "mesh.vertexshader"
+    _shader_name = "mesh.v"
+
+class MeshFragmentShader(FragmentShader):
+    '''A vertex shader used for unstructured mesh rendering.'''
+    _source = "mesh.fragmentshader"
+    _shader_name = "mesh.f"

diff -r ff0ec06d8bd66146811459f03f99c6cf7b7b0883 -r d39510be50625c14c6ee0ecb2d0195ff72a76dd8 yt/visualization/volume_rendering/shaders/mesh.fragmentshader
--- /dev/null
+++ b/yt/visualization/volume_rendering/shaders/mesh.fragmentshader
@@ -0,0 +1,17 @@
+#version 330 core
+
+in float fragmentData;
+out vec4 color;
+
+uniform sampler1D cmap;
+uniform float cmap_min;
+uniform float cmap_max;
+
+void main()
+{
+    float data = fragmentData;
+    float cm = cmap_min;
+    float cp = cmap_max;
+
+    color = texture(cmap, (data - cm) / (cp - cm));
+}

diff -r ff0ec06d8bd66146811459f03f99c6cf7b7b0883 -r d39510be50625c14c6ee0ecb2d0195ff72a76dd8 yt/visualization/volume_rendering/shaders/mesh.vertexshader
--- /dev/null
+++ b/yt/visualization/volume_rendering/shaders/mesh.vertexshader
@@ -0,0 +1,11 @@
+#version 330 core
+
+layout(location = 0) in vec3 vertexPosition_modelspace;
+layout(location = 1) in float vertexData;
+out float fragmentData;
+uniform mat4 model_to_clip;
+void main()
+{
+    gl_Position = model_to_clip * vec4(vertexPosition_modelspace, 1);
+    fragmentData = vertexData;
+}

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.spacepope.org/pipermail/yt-svn-spacepope.org/attachments/20160518/093292aa/attachment-0002.htm>


More information about the yt-svn mailing list