[yt-svn] commit/yt-doc: 5 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Oct 28 12:27:34 PDT 2013


5 new commits in yt-doc:

https://bitbucket.org/yt_analysis/yt-doc/commits/5f19e1124bbf/
Changeset:   5f19e1124bbf
User:        ngoldbaum
Date:        2013-10-28 02:50:51
Summary:     Adding a notebook sphinx extension.

This includes a new sphinx directive, notebook, that accepts the path to an
unevaluated notebook as an argument.  It outputs an evaluated version of the
notebook inline in the sphinx document.

There is a bit of monkeypatching for the notebook CSS we get from IPython's
html exporter.  The search-replaces I'm using may not work with pre-IPython 1.0
notebooks or future notebook version.
Affected #:  2 files

diff -r a059cbb57ddf355a8e311bc1816c0728351deee4 -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 extensions/notebook_sphinxext.py
--- /dev/null
+++ b/extensions/notebook_sphinxext.py
@@ -0,0 +1,151 @@
+import os, shutil, string
+from sphinx.util.compat import Directive
+from docutils import nodes
+from docutils.parsers.rst import directives
+from IPython.nbconvert import html, python
+from runipy.notebook_runner import NotebookRunner
+from jinja2 import FileSystemLoader
+
+class NotebookDirective(Directive):
+    """Insert an evaluated notebook into a document
+
+    This uses runipy and nbconvert to transform a path to an unevaluated notebook
+    into html suitable for embedding in a Sphinx document.
+    """
+    required_arguments = 1
+    optional_arguments = 0
+
+    def run(self):
+        # check if raw html is supported
+        if not self.state.document.settings.raw_enabled:
+            raise self.warning('"%s" directive disabled.' % self.name)
+
+        # get path to notebook
+        source_dir = os.path.dirname(
+            os.path.abspath(self.state.document.current_source))
+        nb_basename = os.path.basename(self.arguments[0])
+        rst_file = self.state_machine.document.attributes['source']
+        rst_dir = os.path.abspath(os.path.dirname(rst_file))
+        nb_abs_path = os.path.join(rst_dir, nb_basename)
+
+        # Move files around.
+        dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
+                                                os.path.dirname(nb_abs_path)))
+        if not os.path.exists(dest_dir):
+            os.makedirs(dest_dir)
+
+        rel_dir = os.path.relpath(rst_dir, setup.confdir)
+        place = os.path.join(dest_dir, rel_dir)
+        if not os.path.isdir(place): os.makedirs(place)
+        dest_path = os.path.join(place, nb_basename)
+        dest_path_eval = string.replace(dest_path, '.ipynb', '_evaluated.ipynb')
+        dest_path_script = string.replace(dest_path, '.ipynb', '.py')
+
+        # Copy unevaluated script
+        try:
+            shutil.copyfile(nb_abs_path, dest_path)
+        except IOError:
+            raise RuntimeError("Unable to copy notebook to build destination.")
+
+        # Create python script vesion
+        unevaluated_text = nb_to_html(nb_abs_path)
+        script_text = nb_to_python(nb_abs_path)
+        f = open(dest_path_script, 'w')
+        f.write(script_text.encode('utf8'))
+        f.close()
+
+        # Create evaluated version and save it to the dest path.
+        # Always use --pylab so figures appear inline
+        # perhaps this is questionable?
+        nb_runner = NotebookRunner(nb_in=nb_abs_path, pylab=True)
+        nb_runner.run_notebook()
+        nb_runner.save_notebook(dest_path_eval)
+        evaluated_text = nb_to_html(dest_path_eval)
+
+        # Create link to notebook and script files
+        link_rst = "(" + \
+                   formatted_link(dest_path) + "; " + \
+                   formatted_link(dest_path_eval) + "; " + \
+                   formatted_link(dest_path_script) + \
+                   ")"
+
+        self.state_machine.insert_input([link_rst], rst_file)
+
+        # create notebook node
+        attributes = {'format': 'html', 'source': 'nb_path'}
+        nb_node = nodes.raw('', evaluated_text, **attributes)
+        (nb_node.source, nb_node.line) = \
+            self.state_machine.get_source_and_line(self.lineno)
+
+        # add dependency
+        self.state.document.settings.record_dependencies.add(nb_abs_path)
+
+        return [nb_node]
+
+class notebook_node(nodes.raw):
+    pass
+
+def nb_to_python(nb_path):
+    """convert notebook to python script"""
+    exporter = python.PythonExporter()
+    output, resources = exporter.from_filename(nb_path)
+    return output
+
+def nb_to_html(nb_path):
+    """convert notebook to html"""
+    exporter = html.HTMLExporter(template_file='full')
+    output, resources = exporter.from_filename(nb_path)
+    header = output.split('<head>', 1)[1].split('</head>',1)[0]
+    body = output.split('<body>', 1)[1].split('</body>',1)[0]
+
+    # http://imgur.com/eR9bMRH
+    header = header.replace('<style', '<style scoped="scoped"')
+    header = header.replace('body{background-color:#ffffff;}\n', '')
+    header = header.replace('body{background-color:white;position:absolute;'
+                            'left:0px;right:0px;top:0px;bottom:0px;'
+                            'overflow:visible;}\n', '')
+    header = header.replace('body{margin:0;'
+                            'font-family:"Helvetica Neue",Helvetica,Arial,'
+                            'sans-serif;font-size:13px;line-height:20px;'
+                            'color:#000000;background-color:#ffffff;}', '')
+    header = header.replace('\na{color:#0088cc;text-decoration:none;}', '')
+    header = header.replace(
+        'a:focus{color:#005580;text-decoration:underline;}', '')
+    header = header.replace(
+        '\nh1,h2,h3,h4,h5,h6{margin:10px 0;font-family:inherit;font-weight:bold;'
+        'line-height:20px;color:inherit;text-rendering:optimizelegibility;}'
+        'h1 small,h2 small,h3 small,h4 small,h5 small,'
+        'h6 small{font-weight:normal;line-height:1;color:#999999;}'
+        '\nh1,h2,h3{line-height:40px;}\nh1{font-size:35.75px;}'
+        '\nh2{font-size:29.25px;}\nh3{font-size:22.75px;}'
+        '\nh4{font-size:16.25px;}\nh5{font-size:13px;}'
+        '\nh6{font-size:11.049999999999999px;}\nh1 small{font-size:22.75px;}'
+        '\nh2 small{font-size:16.25px;}\nh3 small{font-size:13px;}'
+        '\nh4 small{font-size:13px;}', '')
+    header = header.replace('background-color:#ffffff;', '', 1)
+
+    # concatenate raw html lines
+    lines = ['<div class="ipynotebook">']
+    lines.append(header)
+    lines.append(body)
+    lines.append('</div>')
+    return '\n'.join(lines)
+
+def formatted_link(path):
+    return "`%s <%s>`__" % (os.path.basename(path), path)
+
+def visit_notebook_node(self, node):
+    self.visit_raw(node)
+
+def depart_notebook_node(self, node):
+    self.depart_raw(node)
+
+def setup(app):
+    setup.app = app
+    setup.config = app.config
+    setup.confdir = app.confdir
+
+    app.add_node(notebook_node,
+                 html=(visit_notebook_node, depart_notebook_node))
+
+    app.add_directive('notebook', NotebookDirective)

diff -r a059cbb57ddf355a8e311bc1816c0728351deee4 -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 source/conf.py
--- a/source/conf.py
+++ b/source/conf.py
@@ -30,7 +30,7 @@
 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
               'sphinx.ext.pngmath', 'sphinx.ext.viewcode',
               'sphinx.ext.autosummary', 'numpydocmod', 'youtube',
-              'yt_cookbook', 'yt_colormaps']
+              'yt_cookbook', 'yt_colormaps', 'notebook_sphinxext']
 
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']


https://bitbucket.org/yt_analysis/yt-doc/commits/882e0b48dd2e/
Changeset:   882e0b48dd2e
User:        ngoldbaum
Date:        2013-10-28 04:52:40
Summary:     Adding bootcamps to the docs proper.
Affected #:  14 files

diff -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 source/bootcamp/Data_Inspection.ipynb
--- /dev/null
+++ b/source/bootcamp/Data_Inspection.ipynb
@@ -0,0 +1,396 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Starting Out and Loading Data\n",
+      "\n",
+      "We're going to get started by loading up yt.  This next command brings all of the libraries into memory and sets up our environment.  Note that in most scripts, you will want to import from ``yt.mods`` rather than ``yt.imods``.  But using ``yt.imods`` gets you some nice stuff for the IPython notebook, which we'll use below."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.imods import *"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now that we've loaded yt, we can load up some data.  Let's load the `IsolatedGalaxy` dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf = load(os.environ[\"YT_DATA_DIR\"]+\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Fields and Facts\n",
+      "\n",
+      "When you call the `load` function, yt tries to do very little -- this is designed to be a fast operation, just setting up some information about the simulation.  Now, the first time you access the \"hierarchy\" (shorthand is `.h`) it will read and load the mesh and then determine where data is placed in the physical domain and on disk.  Once it knows that, yt can tell you some statistics about the simulation:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf.h.print_stats()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt can also tell you the fields it found on disk:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf.h.field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "And, all of the fields it thinks it knows how to generate:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf.h.derived_field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt can also transparently generate fields.  However, we encourage you to examine exactly what yt is doing when it generates those fields.  To see, you can ask for the source of a given field."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print pf.field_info[\"VorticityX\"].get_source()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt stores information about the domain of the simulation:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print pf.domain_width"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt can also convert this into various units:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print pf.domain_width * pf[\"kpc\"]\n",
+      "print pf.domain_width * pf[\"au\"]\n",
+      "print pf.domain_width * pf[\"miles\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Mesh Structure\n",
+      "\n",
+      "If you're using a simulation type that has grids (for instance, here we're using an Enzo simulation) you can examine the structure of the mesh.  For the most part, you probably won't have to use this unless you're debugging a simulation or examining in detail what is going on."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print pf.h.grid_left_edge"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "But, you may have to access information about individual grid objects!  Each grid object mediates accessing data from the disk and has a number of attributes that tell you about it.  The hierarchy (`pf.h` here) has an attribute `grids` which is all of the grid objects."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print pf.h.grids[0]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "g = pf.h.grids[0]\n",
+      "print g"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Grids have dimensions, extents, level, and even a list of Child grids."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "g.ActiveDimensions"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "g.LeftEdge, g.RightEdge"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "g.Level"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "g.Children"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Advanced Grid Inspection\n",
+      "\n",
+      "If we want to examine grids only at a given level, we can!  Not only that, but we can load data and take a look at various fields.\n",
+      "\n",
+      "*This section can be skipped!*"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "gs = pf.h.select_grids(pf.h.max_level)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "g2 = gs[0]\n",
+      "print g2\n",
+      "print g2.Parent\n",
+      "print g2.get_global_startindex()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print g2[\"Density\"][:,:,0]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print (g2.Parent.child_mask == 0).sum() * 8\n",
+      "print g2.ActiveDimensions.prod()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "for f in pf.h.field_list:\n",
+      "    fv = g[f]\n",
+      "    if fv.size == 0: continue\n",
+      "    print f, fv.min(), fv.max()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "for f in sorted(pf.h.field_list):\n",
+      "    fv = g[f]\n",
+      "    if fv.size == 0: continue\n",
+      "    print f, fv.min(), fv.max()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Examining Data in Regions\n",
+      "\n",
+      "yt provides data object selectors.  In subsequent notebooks we'll examine these in more detail, but we can select a sphere of data and perform a number of operations on it.  yt makes it easy to operate on fluid fields in an object in *bulk*, but you can also examine individual field values.\n",
+      "\n",
+      "This creates a sphere selector positioned at the most dense point in the simulation that has a radius of 10 kpc."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "sp = pf.h.sphere(\"max\", (10, 'kpc'))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print sp"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can calculate a bunch of bulk quantities.  Here's that list, but there's a list in the docs, too!"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print sp.quantities.keys()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Let's look at the total mass.  This is how you call a given quantity.  yt calls these \"Derived Quantities\".  We'll talk about a few in a later notebook."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print sp.quantities[\"TotalMass\"]()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 source/bootcamp/Data_Objects_and_Time_Series.ipynb
--- /dev/null
+++ b/source/bootcamp/Data_Objects_and_Time_Series.ipynb
@@ -0,0 +1,361 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Data Objects and Time Series Data\n",
+      "\n",
+      "Just like before, we will load up yt."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.imods import *"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Time Series Data\n",
+      "\n",
+      "Unlike before, instead of loading a single dataset, this time we'll load a bunch which we'll examine in sequence.  This command creates a `TimeSeriesData` object, which can be iterated over (including in parallel, which is outside the scope of this bootcamp) and analyzed.  There are some other helpful operations it can provide, but we'll stick to the basics here.\n",
+      "\n",
+      "Note that you can specify either a list of filenames, or a glob (i.e., asterisk) pattern in this."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ts = TimeSeriesData.from_filenames(os.environ[\"YT_DATA_DIR\"]+\"enzo_tiny_cosmology/*/*.hierarchy\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "### Example 1: Simple Time Series\n",
+      "\n",
+      "As a simple example of how we can use this functionality, let's find the min and max of the density as a function of time in this simulation.  To do this we use the construction `for pf in ts` where `pf` means \"Parameter File\" and `ts` is the \"Time Series\" we just loaded up.  For each parameter file, we'll create an object (`dd`) that covers the entire domain.  (`all_data` is a shorthand function for this.)  We'll then call the Derived Quantity `Extrema`, and append the min and max to our extrema outputs."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "rho_ex = []\n",
+      "times = []\n",
+      "for pf in ts:\n",
+      "    dd = pf.h.all_data()\n",
+      "    rho_ex.append(dd.quantities[\"Extrema\"](\"Density\")[0])\n",
+      "    times.append(pf.current_time * pf[\"years\"])\n",
+      "rho_ex = np.array(rho_ex)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now we plot the minimum and the maximum:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pylab.semilogy(times, rho_ex[:,0], '-xk')\n",
+      "pylab.semilogy(times, rho_ex[:,1], '-xr')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "### Example 2: Advanced Time Series\n",
+      "\n",
+      "Let's do something a bit different.  Let's calculate the total mass inside halos and outside halos.\n",
+      "\n",
+      "This actually touches a lot of different pieces of machinery in yt.  For every parameter file, we will run the halo finder HOP.  Then, we calculate the total mass in the domain.  Then, for each halo, we calculate the sum of the baryon mass in that halo.  We'll keep running tallies of these two things."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "mass = []\n",
+      "zs = []\n",
+      "for pf in ts:\n",
+      "    halos = HaloFinder(pf)\n",
+      "    dd = pf.h.all_data()\n",
+      "    total_mass = dd.quantities[\"TotalQuantity\"](\"CellMassMsun\")[0]\n",
+      "    total_in_baryons = 0.0\n",
+      "    for halo in halos:\n",
+      "        sp = halo.get_sphere()\n",
+      "        total_in_baryons += sp.quantities[\"TotalQuantity\"](\"CellMassMsun\")[0]\n",
+      "    mass.append(total_in_baryons/total_mass)\n",
+      "    zs.append(pf.current_redshift)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now let's plot them!"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pylab.loglog(zs, mass, '-xb')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Data Objects\n",
+      "\n",
+      "Time series data have many applications, but most of them rely on examining the underlying data in some way.  Below, we'll see how to use and manipulate data objects.\n",
+      "\n",
+      "### Ray Queries\n",
+      "\n",
+      "yt provides the ability to examine rays, or lines, through the domain.  Note that these are not periodic, unlike most other data objects.  We create a ray object and can then examine quantities of it.  Rays have the special fields `t` and `dts`, which correspond to the time the ray enters a given cell and the distance it travels through that cell.\n",
+      "\n",
+      "To create a ray, we specify the start and end points."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ray = pf.h.ray([0.1, 0.2, 0.3], [0.9, 0.8, 0.7])\n",
+      "pylab.semilogy(ray[\"t\"], ray[\"Density\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print ray[\"dts\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print ray[\"t\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print ray[\"x\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "### Slice Queries\n",
+      "\n",
+      "While slices are often used for visualization, they can be useful for other operations as well.  yt regards slices as multi-resolution objects.  They are an array of cells that are not all the same size; it only returns the cells at the highest resolution that it intersects.  (This is true for all yt data objects.)  Slices and projections have the special fields `px`, `py`, `pdx` and `pdy`, which correspond to the coordinates and half-widths in the pixel plane."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf = load(os.environ[\"YT_DATA_DIR\"]+\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "v, c = pf.h.find_max(\"Density\")\n",
+      "sl = pf.h.slice(0, c[0])\n",
+      "print sl[\"x\"], sl[\"z\"], sl[\"pdx\"]\n",
+      "print sl[\"Density\"].shape"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If we want to do something interesting with a Slice, we can turn it into a `FixedResolutionBuffer`.  This object can be queried and will return a 2D array of values."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "frb = sl.to_frb((50.0, 'kpc'), 1024)\n",
+      "print frb[\"Density\"].shape"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt provides a few functions for writing arrays to disk, particularly in image form.  Here we'll write out the log of Density, and then use IPython to display it back here.  Note that for the most part, you will probably want to use a `PlotWindow` for this, but in the case that it is useful you can directly manipulate the data."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "write_image(np.log10(frb[\"Density\"]), \"temp.png\")\n",
+      "from IPython.core.display import Image\n",
+      "Image(filename = \"temp.png\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "### Off-Axis Slices\n",
+      "\n",
+      "yt provides not only slices, but off-axis slices that are sometimes called \"cutting planes.\"  These are specified by (in order) a normal vector and a center.  Here we've set the normal vector to `[0.2, 0.3, 0.5]` and the center to be the point of maximum density.\n",
+      "\n",
+      "We can then turn these directly into plot windows using `to_pw`.  Note that the `to_pw` and `to_frb` methods are available on slices, off-axis slices, and projections, and can be used on any of them."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cp = pf.h.cutting([0.2, 0.3, 0.5], \"max\")\n",
+      "pw = cp.to_pw(fields = [\"Density\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Once we have our plot window from our cutting plane, we can show it here."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pw.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can, as noted above, do the same with our slice:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pws = sl.to_pw(fields=[\"Density\"])\n",
+      "pws.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "### Covering Grids\n",
+      "\n",
+      "If we want to access a 3D array of data that spans multiple resolutions in our simulation, we can use a covering grid.  This will return a 3D array of data, drawing from up to the resolution level specified when creating the data.  For example, if you create a covering grid that spans two child grids of a single parent grid, it will fill those zones covered by a zone of a child grid with the data from that child grid.  Where it is covered only by the parent grid, the cells from the parent grid will be duplicated (appropriately) to fill the covering grid.\n",
+      "\n",
+      "There are two different types of covering grids: unsmoothed and smoothed.  Smoothed grids will be filled through a cascading interpolation process; they will be filled at level 0, interpolated to level 1, filled at level 1, interpolated to level 2, filled at level 2, etc.  This will help to reduce edge effects.  Unsmoothed covering grids will not be interpolated, but rather values will be duplicated multiple times.\n",
+      "\n",
+      "Here we create an unsmoothed covering grid at level 2, with the left edge at `[0.0, 0.0, 0.0]` and with dimensions equal to those that would cover the entire domain at level 2.  We can then ask for the Density field, which will be a 3D array."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cg = pf.h.covering_grid(2, [0.0, 0.0, 0.0], pf.domain_dimensions * 2**2)\n",
+      "print cg[\"Density\"].shape"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "In this example, we do exactly the same thing: except we ask for a *smoothed* covering grid, which will reduce edge effects."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "scg = pf.h.smoothed_covering_grid(2, [0.0, 0.0, 0.0], pf.domain_dimensions * 2**2)\n",
+      "print scg[\"Density\"].shape"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 source/bootcamp/Derived_Fields_and_Profiles.ipynb
--- /dev/null
+++ b/source/bootcamp/Derived_Fields_and_Profiles.ipynb
@@ -0,0 +1,316 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Derived Fields and Profiles\n",
+      "\n",
+      "One of the most powerful features in yt is the ability to create derived fields that act and look exactly like fields that exist on disk.  This means that they will be generated on demand and can be used anywhere a field that exists on disk would be used.  Additionally, you can create them by just writing python functions."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.imods import *"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [],
+     "prompt_number": 1
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Derived Fields\n",
+      "\n",
+      "This is an example of the simplest possible way to create a derived field.  All derived fields are defined by a function and some metadata; that metadata can include units, LaTeX-friendly names, conversion factors, and so on.  Fields can be defined in the way in the next cell.  What this does is create a function which accepts two arguments and then provide the units for that field.  In this case, our field is `Dinosaurs` and our units are `Trex/s`.  The function itself can access any fields that are in the simulation, and it does so by requesting data from the object called `data`."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "@derived_field(units = \"Trex/s\")\n",
+      "def Dinosaurs(field, data):\n",
+      "    return data[\"Density\"]**(2.0/3.0) * data[\"VelocityMagnitude\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [],
+     "prompt_number": 2
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "One important thing to note is that derived fields must be defined *before* any datasets are loaded.  Let's load up our data and take a look at some quantities."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf = load(os.environ[\"YT_DATA_DIR\"]+\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "dd = pf.h.all_data()\n",
+      "print dd.quantities.keys()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "['MinLocation', 'StarAngularMomentumVector', 'WeightedVariance', 'TotalMass', 'AngularMomentumVector', 'TotalQuantity', 'IsBound', 'WeightedAverageQuantity', 'CenterOfMass', 'BulkVelocity', 'ParticleSpinParameter', 'Action', 'Extrema', 'MaxLocation', 'BaryonSpinParameter']\n"
+       ]
+      }
+     ],
+     "prompt_number": 4
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "One interesting question is, what are the minimum and maximum values of dinosaur production rates in our isolated galaxy?  We can do that by examining the `Extrema` quantity -- the exact same way that we would for Density, Temperature, and so on."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd.quantities[\"Extrema\"](\"Dinosaurs\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "[(2.2146366774504352e-20, 9.1573883828992124e-09)]\n"
+       ]
+      }
+     ],
+     "prompt_number": 5
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can do the same for the average quantities as well."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd.quantities[\"WeightedAverageQuantity\"](\"Dinosaurs\", weight=\"Temperature\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## A Few Other Quantities\n",
+      "\n",
+      "We can ask other quantities of our data, as well.  For instance, this sequence of operations will find the most dense point, center a sphere on it, calculate the bulk velocity of that sphere, calculate the baryonic angular momentum vector, and then the density extrema.  All of this is done in a memory conservative way: if you have an absolutely enormous dataset, yt will split that dataset into pieces, apply intermediate reductions and then a final reduction to calculate your quantity."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "sp = pf.h.sphere(\"max\", (10.0, 'kpc'))\n",
+      "bv = sp.quantities[\"BulkVelocity\"]()\n",
+      "L = sp.quantities[\"AngularMomentumVector\"]()\n",
+      "(rho_min, rho_max), = sp.quantities[\"Extrema\"](\"Density\")\n",
+      "print bv, L, rho_min, rho_max"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Profiles\n",
+      "\n",
+      "yt provides the ability to bin in 1, 2 and 3 dimensions.  This means discretizing in one or more dimensions of phase space (density, temperature, etc) and then calculating either the total value of a field in each bin or the average value of a field in each bin.\n",
+      "\n",
+      "We do this using the objects `BinnedProfile1D`, `BinnedProfile2D`, and `BinnedProfile3D`.  The first two are the most common since they are the easiest to visualize.\n",
+      "\n",
+      "This first set of commands manually creates a `BinnedProfile1D` from the sphere we created earlier, binned in 32 bins according to density between `rho_min` and `rho_max`, and then takes the Density-weighted average of the fields `Temperature` and (previously-defined) `Dinosaurs`.  We then plot it in a loglog plot."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prof = BinnedProfile1D(sp, 32, \"Density\", rho_min, rho_max)\n",
+      "prof.add_fields([\"Temperature\", \"Dinosaurs\"], weight=\"Density\")\n",
+      "pylab.loglog(prof[\"Density\"], prof[\"Temperature\"], \"-x\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now we plot the `Dinosaurs` field."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pylab.loglog(prof[\"Density\"], prof[\"Dinosaurs\"], '-x')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If we want to see the total mass in every bin, we add the `CellMassMsun` field with no weight.  Specifying `weight=None` will simply take the total value in every bin and add that up."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prof.add_fields([\"CellMassMsun\"], weight=None)\n",
+      "pylab.loglog(prof[\"Density\"], prof[\"CellMassMsun\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also specify accumulation, which sums all the bins, from left to right.  Note that for 2D and 3D profiles, this needs to be a tuple of length 2 or 3."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prof.add_fields([\"CellMassMsun\"], weight=None, accumulation=True)\n",
+      "pylab.loglog(prof[\"Density\"], prof[\"CellMassMsun\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Advanced Derived Fields\n",
+      "\n",
+      "*This section can be skipped!*\n",
+      "\n",
+      "You can also define fields that require extra zones.  This is useful, for instance, if you want to take the average, or apply a stencil.  yt provides fields like `DivV` that do this internally.  This example is a very busy example of how to do it.  You need to specify the validator `ValidateSpatial` with the number of extra zones *on each side* of the grid that you need, and then inside your function you need to return a field *with those zones stripped off*.  So by necessity, the arrays returned by `data[something]` will have larger spatial extent than what should be returned by the function itself.  If you specify that you need 0 extra zones, this will also work and will simply supply a `grid` object for the field."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "@derived_field(name = \"AveragedTemperature\",\n",
+      "               validators = [ValidateSpatial(1)],\n",
+      "               units = r\"K\")\n",
+      "def _AveragedTemperature(field, data):\n",
+      "    nx, ny, nz = data[\"Temperature\"].shape\n",
+      "    new_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')\n",
+      "    weight_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')\n",
+      "    i_i, j_i, k_i = na.mgrid[0:3,0:3,0:3]\n",
+      "    for i,j,k in zip(i_i.ravel(),j_i.ravel(),k_i.ravel()):\n",
+      "        sl = [slice(i,nx-(2-i)),slice(j,ny-(2-j)),slice(k,nz-(2-k))]\n",
+      "        new_field += data[\"Temperature\"][sl] * data[\"CellMass\"][sl]\n",
+      "        weight_field += data[\"CellMass\"][sl]\n",
+      "    # Now some fancy footwork\n",
+      "    new_field2 = na.zeros((nx,ny,nz))\n",
+      "    new_field2[1:-1,1:-1,1:-1] = new_field/weight_field\n",
+      "    return new_field2"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now, once again, we can access `AveragedTemperature` just like any other field.  Note that because it requires ghost zones, this will be a much slower process!"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf = load(os.environ[\"YT_DATA_DIR\"]+\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "dd = pf.h.all_data()\n",
+      "(tmin, tmax), (atmin, atmax) = dd.quantities[\"Extrema\"]([\"Temperature\", \"AveragedTemperature\"])\n",
+      "print tmin, tmax, atmin, atmax\n",
+      "print tmin / atmin, tmax / atmax"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Field Parameters\n",
+      "\n",
+      "Field parameters are a method of passing information to derived fields.  For instance, you might pass in information about a vector you want to use as a basis for a coordinate transformation.  yt often uses things like `bulk_velocity` to identify velocities that should be subtracted off.  Here we show how that works:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "sp_small = pf.h.sphere(\"max\", (1.0, 'kpc'))\n",
+      "bv = sp_small.quantities[\"BulkVelocity\"]()\n",
+      "\n",
+      "sp = pf.h.sphere(\"max\", (0.1, 'mpc'))\n",
+      "rv1 = sp.quantities[\"Extrema\"](\"RadialVelocity\")\n",
+      "\n",
+      "sp.clear_data()\n",
+      "sp.set_field_parameter(\"bulk_velocity\", bv)\n",
+      "rv2 = sp.quantities[\"Extrema\"](\"RadialVelocity\")\n",
+      "\n",
+      "print bv\n",
+      "print rv1\n",
+      "print rv2"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 source/bootcamp/Introduction.ipynb
--- /dev/null
+++ b/source/bootcamp/Introduction.ipynb
@@ -0,0 +1,76 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Welcome to the yt bootcamp!\n",
+      "\n",
+      "In this brief tutorial, we'll go over how to load up data, analyze things, inspect your data, and make some visualizations.\n",
+      "\n",
+      "But, before we begin, there are a few places to go if you run into trouble.\n",
+      "\n",
+      "**The yt homepage is at http://yt-project.org/**\n",
+      "\n",
+      "## Source of Help\n",
+      "\n",
+      "There are three places to check for help:\n",
+      "\n",
+      " * The documentation: http://yt-project.org/doc/\n",
+      " * The IRC Channel (`#yt` on `chat.freenode.net`, also at http://yt-project.org/irc.html)\n",
+      " * The `yt-users` mailing list, at http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org\n",
+      "\n",
+      "## Sources of Information\n",
+      "\n",
+      "The first place to go for information about any kind of development is BitBucket at https://bitbucket.org/yt_analysis/yt/ , which contains a bug tracker, the source code, and links to other useful places.\n",
+      "\n",
+      "You can find recipes in the documentation ( http://yt-project.org/doc/ ) under the \"Cookbook\" section.\n",
+      "\n",
+      "There is a portal with access to data and IPython notebooks at http://hub.yt-project.org/ .\n",
+      "\n",
+      "## How to Update yt\n",
+      "\n",
+      "If you ever run into a situation where you need to update your yt installation, simply type this on the command line:\n",
+      "\n",
+      "`yt update`\n",
+      "\n",
+      "This will automatically update it for you.\n",
+      "\n",
+      "## Acquiring the datasets for this tutorial\n",
+      "\n",
+      "To access the datasets that are used in these bootcamp tutorials, you can either download them manually at http://yt-project.org/data/, or run this next cell by pressing `Shift-Enter` inside it.  It may take a few minutes.\n",
+      "\n",
+      "## What's Next?\n",
+      "\n",
+      "The Notebooks are meant to be explored in this order:\n",
+      "\n",
+      "1. Introduction\n",
+      "2. Data Inspection\n",
+      "3. Simple Visualization\n",
+      "4. Data Objects and Time Series\n",
+      "5. Derived Fields and Profiles\n",
+      "6. Volume Rendering"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "!export YT_DATA_DIR=$HOME/Documents/test/"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 source/bootcamp/Simple_Visualization.ipynb
--- /dev/null
+++ b/source/bootcamp/Simple_Visualization.ipynb
@@ -0,0 +1,274 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Simple Visualizations of Data\n",
+      "\n",
+      "Just like in our first notebook, we have to load yt and then some data."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.imods import *"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "For this notebook, we'll load up a cosmology dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf = load(os.environ[\"YT_DATA_DIR\"]+\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "print \"Redshift =\", pf.current_redshift"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "In the terms that yt uses, a projection is a line integral through the domain.  This can either be unweighted (in which case a column density is returned) or weighted, in which case an average value is returned.  Projections are, like all other data objects in yt, full-fledged data objects that churn through data and present that to you.  However, we also provide a simple method of creating Projections and plotting them in a single step.  This is called a Plot Window, here specifically known as a `ProjectionPlot`.  One thing to note is that in yt, we project all the way through the entire domain at a single time.  This means that the first call to projecting can be somewhat time consuming, but panning, zooming and plotting are all quite fast.\n",
+      "\n",
+      "yt is designed to make it easy to make nice plots and straightforward to modify those plots directly.  The cookbook in the documentation includes detailed examples of this."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p = ProjectionPlot(pf, \"y\", \"Density\")\n",
+      "p.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `show` command simply sends the plot to the IPython notebook.  You can also call `p.save()` which will save the plot to the file system.  This function accepts an argument, which will be pre-prended to the filename and can be used to name it based on the width or to supply a location.\n",
+      "\n",
+      "Now we'll zoom and pan a bit."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p.zoom(2.0)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p.pan_rel((0.1, 0.0))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p.zoom(10.0)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p.pan_rel((-0.25, -0.5))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p.zoom(0.1)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If we specify multiple fields, each time we call `show` we get multiple plots back.  Same for `save`!"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p = ProjectionPlot(pf, \"z\", [\"Density\", \"Temperature\"], weight_field=\"Density\")\n",
+      "p.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can adjust the colormap on a field-by-field basis."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p.set_cmap(\"Temperature\", \"hot\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "And, we can re-center the plot on different locations.  One possible use of this would be to make a single `ProjectionPlot` which you move around to look at different regions in your simulation, saving at each one."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "v, c = pf.h.find_max(\"Density\")\n",
+      "p.set_center((c[0], c[1]))\n",
+      "p.zoom(10)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Okay, let's load up a bigger simulation (from `Enzo_64` this time) and make a slice plot."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf = load(\"Enzo_64/DD0043/data0043\")\n",
+      "s = SlicePlot(pf, \"z\", [\"Density\", \"VelocityMagnitude\"], center=\"max\")\n",
+      "s.set_cmap(\"VelocityMagnitude\", \"kamae\")\n",
+      "s.zoom(10.0)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can adjust the logging of various fields:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s.set_log(\"VelocityMagnitude\", True)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt provides many different annotations for your plots.  You can see all of these in the documentation, or if you type `s.annotate_` and press tab, a list will show up here.  We'll annotate with velocity arrows."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s.annotate_velocity()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Contours can also be overlaid:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s = SlicePlot(pf, \"x\", [\"Density\"], center=\"max\")\n",
+      "s.annotate_contour(\"Temperature\")\n",
+      "s.zoom(2.5)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, we can save out to the file system."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s.save()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 source/bootcamp/Volume_Rendering.ipynb
--- /dev/null
+++ b/source/bootcamp/Volume_Rendering.ipynb
@@ -0,0 +1,95 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# A Brief Demo of Volume Rendering\n",
+      "\n",
+      "This shows a small amount of volume rendering.  Really, just enough to get your feet wet!"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.imods import *\n",
+      "pf = load(os.environ[\"YT_DATA_DIR\"]+\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To create a volume rendering, we need a camera and a transfer function.  We'll use the `ColorTransferFunction`, which accepts (in log space) the minimum and maximum bounds of our transfer function.  This means behavior for data outside these values is undefined.\n",
+      "\n",
+      "We then add on \"layers\" like an onion.  This function can accept a width (here specified) in data units, and also a color map.  Here we add on four layers.\n",
+      "\n",
+      "Finally, we create a camera.  The focal point is `[0.5, 0.5, 0.5]`, the width is 20 kpc (including front-to-back integration) and we specify a transfer function.  Once we've done that, we call `show` to actually cast our rays and display them inline."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "tf = ColorTransferFunction((-28, -24))\n",
+      "tf.add_layers(4, w=0.01)\n",
+      "cam = pf.h.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], 20.0/pf['kpc'], 512, tf)\n",
+      "cam.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If we want to apply a clipping, we can specify the `clip_ratio`.  This will clip the upper bounds to this value times the `std()` of the image array."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cam.show(clip_ratio=4)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "There are several other options we can specify.  Note that here we have turned on the use of ghost zones, shortened the data interval for the transfer function, and widened our gaussian layers."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "tf = ColorTransferFunction((-28, -25))\n",
+      "tf.add_layers(4, w=0.03)\n",
+      "cam = pf.h.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], 20.0/pf['kpc'], 512, tf, no_ghost=False)\n",
+      "cam.show(clip_ratio=4.0)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 source/bootcamp/data_inspection.rst
--- /dev/null
+++ b/source/bootcamp/data_inspection.rst
@@ -0,0 +1,4 @@
+Data Inspection
+---------------
+
+.. notebook:: Data_Inspection.ipynb

diff -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 source/bootcamp/data_objects_and_time_series.rst
--- /dev/null
+++ b/source/bootcamp/data_objects_and_time_series.rst
@@ -0,0 +1,4 @@
+Data Objects and Time Series
+----------------------------
+
+.. notebook:: Data_Objects_and_Time_Series.ipynb

diff -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 source/bootcamp/derived_fields_and_profiles.rst
--- /dev/null
+++ b/source/bootcamp/derived_fields_and_profiles.rst
@@ -0,0 +1,4 @@
+Derived Fields and Profiles
+---------------------------
+
+.. notebook:: Derived_Fields_and_Profiles.ipynb

diff -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 source/bootcamp/index.rst
--- /dev/null
+++ b/source/bootcamp/index.rst
@@ -0,0 +1,49 @@
+yt Bootcamp
+===========
+
+We have been developing a sequence of materials that can be run in the IPython
+notebook that walk through how to look at data and how to operate on data.
+These are not meant to be detailed walkthroughs, but simply short
+introductions.  Their purpose is to let you explore, interactively, some common
+operations that can be done on data with yt!
+
+To get started with the bootcamp, you need to download the repository and start
+the IPython notebook.  The easiest way, if you have mercurial installed, to get
+the repository is to:
+
+.. code-block:: bash
+
+   hg clone https://bitbucket.org/yt_analysis/yt-doc
+
+If you don't, you can download it from `here
+<https://bitbucket.org/yt_analysis/yt-doc/get/tip.tar.bz2>`_
+
+Now you can start the IPython notebook and begin:
+
+.. code-block:: bash
+
+   cd yt-doc/source/bootcamp
+   yt notebook
+
+This command will give you information about the Notebook Server and how to
+access it.  Once you have done so, choose "Introduction" from the list of
+notebooks, which includes an introduction and information about how to download
+the sample data.
+
+.. warning:: The pre-filled out notebooks are *far* less fun than running them
+             yourselves!  Check out the repo and give it a try.
+
+Here are the notebooks, which have been filled in for inspection:
+
+.. toctree::
+   :maxdepth: 1
+
+   introduction
+   data_inspection
+   simple_visualization
+   data_objects_and_time_series
+   derived_fields_and_profiles
+   volume_rendering
+
+Let us know if you would like to contribute other example notebooks, or have
+any suggestions for how these can be improved.

diff -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 source/bootcamp/introduction.rst
--- /dev/null
+++ b/source/bootcamp/introduction.rst
@@ -0,0 +1,4 @@
+Introduction
+------------
+
+.. notebook:: Introduction.ipynb

diff -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 source/bootcamp/simple_visualization.rst
--- /dev/null
+++ b/source/bootcamp/simple_visualization.rst
@@ -0,0 +1,4 @@
+Simple Visualization
+--------------------
+
+.. notebook:: Simple_Visualization.ipynb

diff -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 source/bootcamp/volume_rendering.rst
--- /dev/null
+++ b/source/bootcamp/volume_rendering.rst
@@ -0,0 +1,4 @@
+Volume Rendering
+----------------
+
+.. notebook:: Volume_Rendering.ipynb

diff -r 5f19e1124bbf7de8d9c1fbe170005384a3923077 -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 source/index.rst
--- a/source/index.rst
+++ b/source/index.rst
@@ -113,7 +113,7 @@
 
    welcome/index
    orientation/index
-   bootcamp
+   bootcamp/index
    workshop
    help/index
    interacting/index


https://bitbucket.org/yt_analysis/yt-doc/commits/204c648d7b9b/
Changeset:   204c648d7b9b
User:        ngoldbaum
Date:        2013-10-28 07:32:44
Summary:     Use the test_data_dir to simplify data naming in the bootcamp.
Affected #:  6 files

diff -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 -r 204c648d7b9ba893e53b51ea5fcf8af38b48156c source/bootcamp/Data_Inspection.ipynb
--- a/source/bootcamp/Data_Inspection.ipynb
+++ b/source/bootcamp/Data_Inspection.ipynb
@@ -37,7 +37,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(os.environ[\"YT_DATA_DIR\"]+\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
+      "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
      ],
      "language": "python",
      "metadata": {},

diff -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 -r 204c648d7b9ba893e53b51ea5fcf8af38b48156c source/bootcamp/Data_Objects_and_Time_Series.ipynb
--- a/source/bootcamp/Data_Objects_and_Time_Series.ipynb
+++ b/source/bootcamp/Data_Objects_and_Time_Series.ipynb
@@ -41,7 +41,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ts = TimeSeriesData.from_filenames(os.environ[\"YT_DATA_DIR\"]+\"enzo_tiny_cosmology/*/*.hierarchy\")"
+      "ts = TimeSeriesData.from_filenames(\"enzo_tiny_cosmology/*/*.hierarchy\")"
      ],
      "language": "python",
      "metadata": {},
@@ -208,7 +208,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(os.environ[\"YT_DATA_DIR\"]+\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
       "v, c = pf.h.find_max(\"Density\")\n",
       "sl = pf.h.slice(0, c[0])\n",
       "print sl[\"x\"], sl[\"z\"], sl[\"pdx\"]\n",

diff -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 -r 204c648d7b9ba893e53b51ea5fcf8af38b48156c source/bootcamp/Derived_Fields_and_Profiles.ipynb
--- a/source/bootcamp/Derived_Fields_and_Profiles.ipynb
+++ b/source/bootcamp/Derived_Fields_and_Profiles.ipynb
@@ -60,7 +60,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(os.environ[\"YT_DATA_DIR\"]+\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
       "dd = pf.h.all_data()\n",
       "print dd.quantities.keys()"
      ],
@@ -268,7 +268,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(os.environ[\"YT_DATA_DIR\"]+\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
       "dd = pf.h.all_data()\n",
       "(tmin, tmax), (atmin, atmax) = dd.quantities[\"Extrema\"]([\"Temperature\", \"AveragedTemperature\"])\n",
       "print tmin, tmax, atmin, atmax\n",

diff -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 -r 204c648d7b9ba893e53b51ea5fcf8af38b48156c source/bootcamp/Introduction.ipynb
--- a/source/bootcamp/Introduction.ipynb
+++ b/source/bootcamp/Introduction.ipynb
@@ -45,7 +45,7 @@
       "\n",
       "## Acquiring the datasets for this tutorial\n",
       "\n",
-      "To access the datasets that are used in these bootcamp tutorials, you can either download them manually at http://yt-project.org/data/, or run this next cell by pressing `Shift-Enter` inside it.  It may take a few minutes.\n",
+      "To access the datasets that are used in these bootcamp tutorials, you can either download them manually at http://yt-project.org/data/.\n",
       "\n",
       "## What's Next?\n",
       "\n",
@@ -58,16 +58,6 @@
       "5. Derived Fields and Profiles\n",
       "6. Volume Rendering"
      ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "!export YT_DATA_DIR=$HOME/Documents/test/"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
     }
    ],
    "metadata": {}

diff -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 -r 204c648d7b9ba893e53b51ea5fcf8af38b48156c source/bootcamp/Simple_Visualization.ipynb
--- a/source/bootcamp/Simple_Visualization.ipynb
+++ b/source/bootcamp/Simple_Visualization.ipynb
@@ -37,7 +37,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(os.environ[\"YT_DATA_DIR\"]+\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "pf = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
       "print \"Redshift =\", pf.current_redshift"
      ],
      "language": "python",

diff -r 882e0b48dd2e237e96c38ea7bc51f5704e1d7ad8 -r 204c648d7b9ba893e53b51ea5fcf8af38b48156c source/bootcamp/Volume_Rendering.ipynb
--- a/source/bootcamp/Volume_Rendering.ipynb
+++ b/source/bootcamp/Volume_Rendering.ipynb
@@ -21,7 +21,7 @@
      "collapsed": false,
      "input": [
       "from yt.imods import *\n",
-      "pf = load(os.environ[\"YT_DATA_DIR\"]+\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
+      "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
      ],
      "language": "python",
      "metadata": {},


https://bitbucket.org/yt_analysis/yt-doc/commits/4dfdb45a4fc4/
Changeset:   4dfdb45a4fc4
User:        ngoldbaum
Date:        2013-10-28 20:26:03
Summary:     Do a better job of cleaning up; write to build directory instead of source.
Affected #:  2 files

diff -r 204c648d7b9ba893e53b51ea5fcf8af38b48156c -r 4dfdb45a4fc4279dd1e7bd5a86d44e41416f5efe extensions/notebook_sphinxext.py
--- a/extensions/notebook_sphinxext.py
+++ b/extensions/notebook_sphinxext.py
@@ -1,4 +1,4 @@
-import os, shutil, string
+import os, shutil, string, glob
 from sphinx.util.compat import Directive
 from docutils import nodes
 from docutils.parsers.rst import directives
@@ -29,24 +29,22 @@
         nb_abs_path = os.path.join(rst_dir, nb_basename)
 
         # Move files around.
-        dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
-                                                os.path.dirname(nb_abs_path)))
+        rel_dir = os.path.relpath(rst_dir, setup.confdir)
+        dest_dir = os.path.join(setup.app.builder.outdir, rel_dir)
+        dest_path = os.path.join(dest_dir, nb_basename)
+
         if not os.path.exists(dest_dir):
             os.makedirs(dest_dir)
 
-        rel_dir = os.path.relpath(rst_dir, setup.confdir)
-        place = os.path.join(dest_dir, rel_dir)
-        if not os.path.isdir(place): os.makedirs(place)
-        dest_path = os.path.join(place, nb_basename)
-        dest_path_eval = string.replace(dest_path, '.ipynb', '_evaluated.ipynb')
-        dest_path_script = string.replace(dest_path, '.ipynb', '.py')
-
         # Copy unevaluated script
         try:
             shutil.copyfile(nb_abs_path, dest_path)
         except IOError:
             raise RuntimeError("Unable to copy notebook to build destination.")
 
+        dest_path_eval = string.replace(dest_path, '.ipynb', '_evaluated.ipynb')
+        dest_path_script = string.replace(dest_path, '.ipynb', '.py')
+
         # Create python script vesion
         unevaluated_text = nb_to_html(nb_abs_path)
         script_text = nb_to_python(nb_abs_path)
@@ -80,6 +78,11 @@
         # add dependency
         self.state.document.settings.record_dependencies.add(nb_abs_path)
 
+        # clean up png files left behind by notebooks.
+        png_files = glob.glob("*.png")
+        for file in png_files:
+            os.remove(file)
+
         return [nb_node]
 
 class notebook_node(nodes.raw):

diff -r 204c648d7b9ba893e53b51ea5fcf8af38b48156c -r 4dfdb45a4fc4279dd1e7bd5a86d44e41416f5efe source/conf.py
--- a/source/conf.py
+++ b/source/conf.py
@@ -243,5 +243,5 @@
                        'http://matplotlib.sourceforge.net/': None,
                        }
 
-if not on_rtd:
-    autosummary_generate = glob.glob("api/api.rst")
+#if not on_rtd:
+#    autosummary_generate = glob.glob("api/api.rst")


https://bitbucket.org/yt_analysis/yt-doc/commits/fde431521333/
Changeset:   fde431521333
User:        MatthewTurk
Date:        2013-10-28 20:27:32
Summary:     Merged in ngoldbaum/yt-doc (pull request #109)

New notebook sphinx directive and adding bootcamps to the docs build
Affected #:  16 files

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 extensions/notebook_sphinxext.py
--- /dev/null
+++ b/extensions/notebook_sphinxext.py
@@ -0,0 +1,154 @@
+import os, shutil, string, glob
+from sphinx.util.compat import Directive
+from docutils import nodes
+from docutils.parsers.rst import directives
+from IPython.nbconvert import html, python
+from runipy.notebook_runner import NotebookRunner
+from jinja2 import FileSystemLoader
+
+class NotebookDirective(Directive):
+    """Insert an evaluated notebook into a document
+
+    This uses runipy and nbconvert to transform a path to an unevaluated notebook
+    into html suitable for embedding in a Sphinx document.
+    """
+    required_arguments = 1
+    optional_arguments = 0
+
+    def run(self):
+        # check if raw html is supported
+        if not self.state.document.settings.raw_enabled:
+            raise self.warning('"%s" directive disabled.' % self.name)
+
+        # get path to notebook
+        source_dir = os.path.dirname(
+            os.path.abspath(self.state.document.current_source))
+        nb_basename = os.path.basename(self.arguments[0])
+        rst_file = self.state_machine.document.attributes['source']
+        rst_dir = os.path.abspath(os.path.dirname(rst_file))
+        nb_abs_path = os.path.join(rst_dir, nb_basename)
+
+        # Move files around.
+        rel_dir = os.path.relpath(rst_dir, setup.confdir)
+        dest_dir = os.path.join(setup.app.builder.outdir, rel_dir)
+        dest_path = os.path.join(dest_dir, nb_basename)
+
+        if not os.path.exists(dest_dir):
+            os.makedirs(dest_dir)
+
+        # Copy unevaluated script
+        try:
+            shutil.copyfile(nb_abs_path, dest_path)
+        except IOError:
+            raise RuntimeError("Unable to copy notebook to build destination.")
+
+        dest_path_eval = string.replace(dest_path, '.ipynb', '_evaluated.ipynb')
+        dest_path_script = string.replace(dest_path, '.ipynb', '.py')
+
+        # Create python script vesion
+        unevaluated_text = nb_to_html(nb_abs_path)
+        script_text = nb_to_python(nb_abs_path)
+        f = open(dest_path_script, 'w')
+        f.write(script_text.encode('utf8'))
+        f.close()
+
+        # Create evaluated version and save it to the dest path.
+        # Always use --pylab so figures appear inline
+        # perhaps this is questionable?
+        nb_runner = NotebookRunner(nb_in=nb_abs_path, pylab=True)
+        nb_runner.run_notebook()
+        nb_runner.save_notebook(dest_path_eval)
+        evaluated_text = nb_to_html(dest_path_eval)
+
+        # Create link to notebook and script files
+        link_rst = "(" + \
+                   formatted_link(dest_path) + "; " + \
+                   formatted_link(dest_path_eval) + "; " + \
+                   formatted_link(dest_path_script) + \
+                   ")"
+
+        self.state_machine.insert_input([link_rst], rst_file)
+
+        # create notebook node
+        attributes = {'format': 'html', 'source': 'nb_path'}
+        nb_node = nodes.raw('', evaluated_text, **attributes)
+        (nb_node.source, nb_node.line) = \
+            self.state_machine.get_source_and_line(self.lineno)
+
+        # add dependency
+        self.state.document.settings.record_dependencies.add(nb_abs_path)
+
+        # clean up png files left behind by notebooks.
+        png_files = glob.glob("*.png")
+        for file in png_files:
+            os.remove(file)
+
+        return [nb_node]
+
+class notebook_node(nodes.raw):
+    pass
+
+def nb_to_python(nb_path):
+    """convert notebook to python script"""
+    exporter = python.PythonExporter()
+    output, resources = exporter.from_filename(nb_path)
+    return output
+
+def nb_to_html(nb_path):
+    """convert notebook to html"""
+    exporter = html.HTMLExporter(template_file='full')
+    output, resources = exporter.from_filename(nb_path)
+    header = output.split('<head>', 1)[1].split('</head>',1)[0]
+    body = output.split('<body>', 1)[1].split('</body>',1)[0]
+
+    # http://imgur.com/eR9bMRH
+    header = header.replace('<style', '<style scoped="scoped"')
+    header = header.replace('body{background-color:#ffffff;}\n', '')
+    header = header.replace('body{background-color:white;position:absolute;'
+                            'left:0px;right:0px;top:0px;bottom:0px;'
+                            'overflow:visible;}\n', '')
+    header = header.replace('body{margin:0;'
+                            'font-family:"Helvetica Neue",Helvetica,Arial,'
+                            'sans-serif;font-size:13px;line-height:20px;'
+                            'color:#000000;background-color:#ffffff;}', '')
+    header = header.replace('\na{color:#0088cc;text-decoration:none;}', '')
+    header = header.replace(
+        'a:focus{color:#005580;text-decoration:underline;}', '')
+    header = header.replace(
+        '\nh1,h2,h3,h4,h5,h6{margin:10px 0;font-family:inherit;font-weight:bold;'
+        'line-height:20px;color:inherit;text-rendering:optimizelegibility;}'
+        'h1 small,h2 small,h3 small,h4 small,h5 small,'
+        'h6 small{font-weight:normal;line-height:1;color:#999999;}'
+        '\nh1,h2,h3{line-height:40px;}\nh1{font-size:35.75px;}'
+        '\nh2{font-size:29.25px;}\nh3{font-size:22.75px;}'
+        '\nh4{font-size:16.25px;}\nh5{font-size:13px;}'
+        '\nh6{font-size:11.049999999999999px;}\nh1 small{font-size:22.75px;}'
+        '\nh2 small{font-size:16.25px;}\nh3 small{font-size:13px;}'
+        '\nh4 small{font-size:13px;}', '')
+    header = header.replace('background-color:#ffffff;', '', 1)
+
+    # concatenate raw html lines
+    lines = ['<div class="ipynotebook">']
+    lines.append(header)
+    lines.append(body)
+    lines.append('</div>')
+    return '\n'.join(lines)
+
+def formatted_link(path):
+    return "`%s <%s>`__" % (os.path.basename(path), path)
+
+def visit_notebook_node(self, node):
+    self.visit_raw(node)
+
+def depart_notebook_node(self, node):
+    self.depart_raw(node)
+
+def setup(app):
+    setup.app = app
+    setup.config = app.config
+    setup.confdir = app.confdir
+
+    app.add_node(notebook_node,
+                 html=(visit_notebook_node, depart_notebook_node))
+
+    app.add_directive('notebook', NotebookDirective)

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 source/bootcamp/Data_Inspection.ipynb
--- /dev/null
+++ b/source/bootcamp/Data_Inspection.ipynb
@@ -0,0 +1,396 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Starting Out and Loading Data\n",
+      "\n",
+      "We're going to get started by loading up yt.  This next command brings all of the libraries into memory and sets up our environment.  Note that in most scripts, you will want to import from ``yt.mods`` rather than ``yt.imods``.  But using ``yt.imods`` gets you some nice stuff for the IPython notebook, which we'll use below."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.imods import *"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now that we've loaded yt, we can load up some data.  Let's load the `IsolatedGalaxy` dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Fields and Facts\n",
+      "\n",
+      "When you call the `load` function, yt tries to do very little -- this is designed to be a fast operation, just setting up some information about the simulation.  Now, the first time you access the \"hierarchy\" (shorthand is `.h`) it will read and load the mesh and then determine where data is placed in the physical domain and on disk.  Once it knows that, yt can tell you some statistics about the simulation:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf.h.print_stats()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt can also tell you the fields it found on disk:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf.h.field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "And, all of the fields it thinks it knows how to generate:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf.h.derived_field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt can also transparently generate fields.  However, we encourage you to examine exactly what yt is doing when it generates those fields.  To see, you can ask for the source of a given field."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print pf.field_info[\"VorticityX\"].get_source()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt stores information about the domain of the simulation:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print pf.domain_width"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt can also convert this into various units:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print pf.domain_width * pf[\"kpc\"]\n",
+      "print pf.domain_width * pf[\"au\"]\n",
+      "print pf.domain_width * pf[\"miles\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Mesh Structure\n",
+      "\n",
+      "If you're using a simulation type that has grids (for instance, here we're using an Enzo simulation) you can examine the structure of the mesh.  For the most part, you probably won't have to use this unless you're debugging a simulation or examining in detail what is going on."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print pf.h.grid_left_edge"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "But, you may have to access information about individual grid objects!  Each grid object mediates accessing data from the disk and has a number of attributes that tell you about it.  The hierarchy (`pf.h` here) has an attribute `grids` which is all of the grid objects."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print pf.h.grids[0]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "g = pf.h.grids[0]\n",
+      "print g"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Grids have dimensions, extents, level, and even a list of Child grids."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "g.ActiveDimensions"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "g.LeftEdge, g.RightEdge"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "g.Level"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "g.Children"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Advanced Grid Inspection\n",
+      "\n",
+      "If we want to examine grids only at a given level, we can!  Not only that, but we can load data and take a look at various fields.\n",
+      "\n",
+      "*This section can be skipped!*"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "gs = pf.h.select_grids(pf.h.max_level)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "g2 = gs[0]\n",
+      "print g2\n",
+      "print g2.Parent\n",
+      "print g2.get_global_startindex()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print g2[\"Density\"][:,:,0]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print (g2.Parent.child_mask == 0).sum() * 8\n",
+      "print g2.ActiveDimensions.prod()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "for f in pf.h.field_list:\n",
+      "    fv = g[f]\n",
+      "    if fv.size == 0: continue\n",
+      "    print f, fv.min(), fv.max()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "for f in sorted(pf.h.field_list):\n",
+      "    fv = g[f]\n",
+      "    if fv.size == 0: continue\n",
+      "    print f, fv.min(), fv.max()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Examining Data in Regions\n",
+      "\n",
+      "yt provides data object selectors.  In subsequent notebooks we'll examine these in more detail, but we can select a sphere of data and perform a number of operations on it.  yt makes it easy to operate on fluid fields in an object in *bulk*, but you can also examine individual field values.\n",
+      "\n",
+      "This creates a sphere selector positioned at the most dense point in the simulation that has a radius of 10 kpc."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "sp = pf.h.sphere(\"max\", (10, 'kpc'))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print sp"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can calculate a bunch of bulk quantities.  Here's that list, but there's a list in the docs, too!"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print sp.quantities.keys()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Let's look at the total mass.  This is how you call a given quantity.  yt calls these \"Derived Quantities\".  We'll talk about a few in a later notebook."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print sp.quantities[\"TotalMass\"]()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 source/bootcamp/Data_Objects_and_Time_Series.ipynb
--- /dev/null
+++ b/source/bootcamp/Data_Objects_and_Time_Series.ipynb
@@ -0,0 +1,361 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Data Objects and Time Series Data\n",
+      "\n",
+      "Just like before, we will load up yt."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.imods import *"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Time Series Data\n",
+      "\n",
+      "Unlike before, instead of loading a single dataset, this time we'll load a bunch which we'll examine in sequence.  This command creates a `TimeSeriesData` object, which can be iterated over (including in parallel, which is outside the scope of this bootcamp) and analyzed.  There are some other helpful operations it can provide, but we'll stick to the basics here.\n",
+      "\n",
+      "Note that you can specify either a list of filenames, or a glob (i.e., asterisk) pattern in this."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ts = TimeSeriesData.from_filenames(\"enzo_tiny_cosmology/*/*.hierarchy\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "### Example 1: Simple Time Series\n",
+      "\n",
+      "As a simple example of how we can use this functionality, let's find the min and max of the density as a function of time in this simulation.  To do this we use the construction `for pf in ts` where `pf` means \"Parameter File\" and `ts` is the \"Time Series\" we just loaded up.  For each parameter file, we'll create an object (`dd`) that covers the entire domain.  (`all_data` is a shorthand function for this.)  We'll then call the Derived Quantity `Extrema`, and append the min and max to our extrema outputs."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "rho_ex = []\n",
+      "times = []\n",
+      "for pf in ts:\n",
+      "    dd = pf.h.all_data()\n",
+      "    rho_ex.append(dd.quantities[\"Extrema\"](\"Density\")[0])\n",
+      "    times.append(pf.current_time * pf[\"years\"])\n",
+      "rho_ex = np.array(rho_ex)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now we plot the minimum and the maximum:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pylab.semilogy(times, rho_ex[:,0], '-xk')\n",
+      "pylab.semilogy(times, rho_ex[:,1], '-xr')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "### Example 2: Advanced Time Series\n",
+      "\n",
+      "Let's do something a bit different.  Let's calculate the total mass inside halos and outside halos.\n",
+      "\n",
+      "This actually touches a lot of different pieces of machinery in yt.  For every parameter file, we will run the halo finder HOP.  Then, we calculate the total mass in the domain.  Then, for each halo, we calculate the sum of the baryon mass in that halo.  We'll keep running tallies of these two things."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "mass = []\n",
+      "zs = []\n",
+      "for pf in ts:\n",
+      "    halos = HaloFinder(pf)\n",
+      "    dd = pf.h.all_data()\n",
+      "    total_mass = dd.quantities[\"TotalQuantity\"](\"CellMassMsun\")[0]\n",
+      "    total_in_baryons = 0.0\n",
+      "    for halo in halos:\n",
+      "        sp = halo.get_sphere()\n",
+      "        total_in_baryons += sp.quantities[\"TotalQuantity\"](\"CellMassMsun\")[0]\n",
+      "    mass.append(total_in_baryons/total_mass)\n",
+      "    zs.append(pf.current_redshift)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now let's plot them!"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pylab.loglog(zs, mass, '-xb')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Data Objects\n",
+      "\n",
+      "Time series data have many applications, but most of them rely on examining the underlying data in some way.  Below, we'll see how to use and manipulate data objects.\n",
+      "\n",
+      "### Ray Queries\n",
+      "\n",
+      "yt provides the ability to examine rays, or lines, through the domain.  Note that these are not periodic, unlike most other data objects.  We create a ray object and can then examine quantities of it.  Rays have the special fields `t` and `dts`, which correspond to the time the ray enters a given cell and the distance it travels through that cell.\n",
+      "\n",
+      "To create a ray, we specify the start and end points."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ray = pf.h.ray([0.1, 0.2, 0.3], [0.9, 0.8, 0.7])\n",
+      "pylab.semilogy(ray[\"t\"], ray[\"Density\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print ray[\"dts\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print ray[\"t\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print ray[\"x\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "### Slice Queries\n",
+      "\n",
+      "While slices are often used for visualization, they can be useful for other operations as well.  yt regards slices as multi-resolution objects.  They are an array of cells that are not all the same size; it only returns the cells at the highest resolution that it intersects.  (This is true for all yt data objects.)  Slices and projections have the special fields `px`, `py`, `pdx` and `pdy`, which correspond to the coordinates and half-widths in the pixel plane."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "v, c = pf.h.find_max(\"Density\")\n",
+      "sl = pf.h.slice(0, c[0])\n",
+      "print sl[\"x\"], sl[\"z\"], sl[\"pdx\"]\n",
+      "print sl[\"Density\"].shape"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If we want to do something interesting with a Slice, we can turn it into a `FixedResolutionBuffer`.  This object can be queried and will return a 2D array of values."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "frb = sl.to_frb((50.0, 'kpc'), 1024)\n",
+      "print frb[\"Density\"].shape"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt provides a few functions for writing arrays to disk, particularly in image form.  Here we'll write out the log of Density, and then use IPython to display it back here.  Note that for the most part, you will probably want to use a `PlotWindow` for this, but in the case that it is useful you can directly manipulate the data."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "write_image(np.log10(frb[\"Density\"]), \"temp.png\")\n",
+      "from IPython.core.display import Image\n",
+      "Image(filename = \"temp.png\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "### Off-Axis Slices\n",
+      "\n",
+      "yt provides not only slices, but off-axis slices that are sometimes called \"cutting planes.\"  These are specified by (in order) a normal vector and a center.  Here we've set the normal vector to `[0.2, 0.3, 0.5]` and the center to be the point of maximum density.\n",
+      "\n",
+      "We can then turn these directly into plot windows using `to_pw`.  Note that the `to_pw` and `to_frb` methods are available on slices, off-axis slices, and projections, and can be used on any of them."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cp = pf.h.cutting([0.2, 0.3, 0.5], \"max\")\n",
+      "pw = cp.to_pw(fields = [\"Density\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Once we have our plot window from our cutting plane, we can show it here."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pw.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can, as noted above, do the same with our slice:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pws = sl.to_pw(fields=[\"Density\"])\n",
+      "pws.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "### Covering Grids\n",
+      "\n",
+      "If we want to access a 3D array of data that spans multiple resolutions in our simulation, we can use a covering grid.  This will return a 3D array of data, drawing from up to the resolution level specified when creating the data.  For example, if you create a covering grid that spans two child grids of a single parent grid, it will fill those zones covered by a zone of a child grid with the data from that child grid.  Where it is covered only by the parent grid, the cells from the parent grid will be duplicated (appropriately) to fill the covering grid.\n",
+      "\n",
+      "There are two different types of covering grids: unsmoothed and smoothed.  Smoothed grids will be filled through a cascading interpolation process; they will be filled at level 0, interpolated to level 1, filled at level 1, interpolated to level 2, filled at level 2, etc.  This will help to reduce edge effects.  Unsmoothed covering grids will not be interpolated, but rather values will be duplicated multiple times.\n",
+      "\n",
+      "Here we create an unsmoothed covering grid at level 2, with the left edge at `[0.0, 0.0, 0.0]` and with dimensions equal to those that would cover the entire domain at level 2.  We can then ask for the Density field, which will be a 3D array."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cg = pf.h.covering_grid(2, [0.0, 0.0, 0.0], pf.domain_dimensions * 2**2)\n",
+      "print cg[\"Density\"].shape"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "In this example, we do exactly the same thing: except we ask for a *smoothed* covering grid, which will reduce edge effects."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "scg = pf.h.smoothed_covering_grid(2, [0.0, 0.0, 0.0], pf.domain_dimensions * 2**2)\n",
+      "print scg[\"Density\"].shape"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 source/bootcamp/Derived_Fields_and_Profiles.ipynb
--- /dev/null
+++ b/source/bootcamp/Derived_Fields_and_Profiles.ipynb
@@ -0,0 +1,316 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Derived Fields and Profiles\n",
+      "\n",
+      "One of the most powerful features in yt is the ability to create derived fields that act and look exactly like fields that exist on disk.  This means that they will be generated on demand and can be used anywhere a field that exists on disk would be used.  Additionally, you can create them by just writing python functions."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.imods import *"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [],
+     "prompt_number": 1
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Derived Fields\n",
+      "\n",
+      "This is an example of the simplest possible way to create a derived field.  All derived fields are defined by a function and some metadata; that metadata can include units, LaTeX-friendly names, conversion factors, and so on.  Fields can be defined in the way in the next cell.  What this does is create a function which accepts two arguments and then provide the units for that field.  In this case, our field is `Dinosaurs` and our units are `Trex/s`.  The function itself can access any fields that are in the simulation, and it does so by requesting data from the object called `data`."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "@derived_field(units = \"Trex/s\")\n",
+      "def Dinosaurs(field, data):\n",
+      "    return data[\"Density\"]**(2.0/3.0) * data[\"VelocityMagnitude\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [],
+     "prompt_number": 2
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "One important thing to note is that derived fields must be defined *before* any datasets are loaded.  Let's load up our data and take a look at some quantities."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "dd = pf.h.all_data()\n",
+      "print dd.quantities.keys()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "['MinLocation', 'StarAngularMomentumVector', 'WeightedVariance', 'TotalMass', 'AngularMomentumVector', 'TotalQuantity', 'IsBound', 'WeightedAverageQuantity', 'CenterOfMass', 'BulkVelocity', 'ParticleSpinParameter', 'Action', 'Extrema', 'MaxLocation', 'BaryonSpinParameter']\n"
+       ]
+      }
+     ],
+     "prompt_number": 4
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "One interesting question is, what are the minimum and maximum values of dinosaur production rates in our isolated galaxy?  We can do that by examining the `Extrema` quantity -- the exact same way that we would for Density, Temperature, and so on."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd.quantities[\"Extrema\"](\"Dinosaurs\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": [
+      {
+       "output_type": "stream",
+       "stream": "stdout",
+       "text": [
+        "[(2.2146366774504352e-20, 9.1573883828992124e-09)]\n"
+       ]
+      }
+     ],
+     "prompt_number": 5
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can do the same for the average quantities as well."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd.quantities[\"WeightedAverageQuantity\"](\"Dinosaurs\", weight=\"Temperature\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## A Few Other Quantities\n",
+      "\n",
+      "We can ask other quantities of our data, as well.  For instance, this sequence of operations will find the most dense point, center a sphere on it, calculate the bulk velocity of that sphere, calculate the baryonic angular momentum vector, and then the density extrema.  All of this is done in a memory conservative way: if you have an absolutely enormous dataset, yt will split that dataset into pieces, apply intermediate reductions and then a final reduction to calculate your quantity."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "sp = pf.h.sphere(\"max\", (10.0, 'kpc'))\n",
+      "bv = sp.quantities[\"BulkVelocity\"]()\n",
+      "L = sp.quantities[\"AngularMomentumVector\"]()\n",
+      "(rho_min, rho_max), = sp.quantities[\"Extrema\"](\"Density\")\n",
+      "print bv, L, rho_min, rho_max"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Profiles\n",
+      "\n",
+      "yt provides the ability to bin in 1, 2 and 3 dimensions.  This means discretizing in one or more dimensions of phase space (density, temperature, etc) and then calculating either the total value of a field in each bin or the average value of a field in each bin.\n",
+      "\n",
+      "We do this using the objects `BinnedProfile1D`, `BinnedProfile2D`, and `BinnedProfile3D`.  The first two are the most common since they are the easiest to visualize.\n",
+      "\n",
+      "This first set of commands manually creates a `BinnedProfile1D` from the sphere we created earlier, binned in 32 bins according to density between `rho_min` and `rho_max`, and then takes the Density-weighted average of the fields `Temperature` and (previously-defined) `Dinosaurs`.  We then plot it in a loglog plot."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prof = BinnedProfile1D(sp, 32, \"Density\", rho_min, rho_max)\n",
+      "prof.add_fields([\"Temperature\", \"Dinosaurs\"], weight=\"Density\")\n",
+      "pylab.loglog(prof[\"Density\"], prof[\"Temperature\"], \"-x\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now we plot the `Dinosaurs` field."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pylab.loglog(prof[\"Density\"], prof[\"Dinosaurs\"], '-x')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If we want to see the total mass in every bin, we add the `CellMassMsun` field with no weight.  Specifying `weight=None` will simply take the total value in every bin and add that up."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prof.add_fields([\"CellMassMsun\"], weight=None)\n",
+      "pylab.loglog(prof[\"Density\"], prof[\"CellMassMsun\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also specify accumulation, which sums all the bins, from left to right.  Note that for 2D and 3D profiles, this needs to be a tuple of length 2 or 3."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prof.add_fields([\"CellMassMsun\"], weight=None, accumulation=True)\n",
+      "pylab.loglog(prof[\"Density\"], prof[\"CellMassMsun\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Advanced Derived Fields\n",
+      "\n",
+      "*This section can be skipped!*\n",
+      "\n",
+      "You can also define fields that require extra zones.  This is useful, for instance, if you want to take the average, or apply a stencil.  yt provides fields like `DivV` that do this internally.  This example is a very busy example of how to do it.  You need to specify the validator `ValidateSpatial` with the number of extra zones *on each side* of the grid that you need, and then inside your function you need to return a field *with those zones stripped off*.  So by necessity, the arrays returned by `data[something]` will have larger spatial extent than what should be returned by the function itself.  If you specify that you need 0 extra zones, this will also work and will simply supply a `grid` object for the field."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "@derived_field(name = \"AveragedTemperature\",\n",
+      "               validators = [ValidateSpatial(1)],\n",
+      "               units = r\"K\")\n",
+      "def _AveragedTemperature(field, data):\n",
+      "    nx, ny, nz = data[\"Temperature\"].shape\n",
+      "    new_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')\n",
+      "    weight_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')\n",
+      "    i_i, j_i, k_i = na.mgrid[0:3,0:3,0:3]\n",
+      "    for i,j,k in zip(i_i.ravel(),j_i.ravel(),k_i.ravel()):\n",
+      "        sl = [slice(i,nx-(2-i)),slice(j,ny-(2-j)),slice(k,nz-(2-k))]\n",
+      "        new_field += data[\"Temperature\"][sl] * data[\"CellMass\"][sl]\n",
+      "        weight_field += data[\"CellMass\"][sl]\n",
+      "    # Now some fancy footwork\n",
+      "    new_field2 = na.zeros((nx,ny,nz))\n",
+      "    new_field2[1:-1,1:-1,1:-1] = new_field/weight_field\n",
+      "    return new_field2"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now, once again, we can access `AveragedTemperature` just like any other field.  Note that because it requires ghost zones, this will be a much slower process!"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "dd = pf.h.all_data()\n",
+      "(tmin, tmax), (atmin, atmax) = dd.quantities[\"Extrema\"]([\"Temperature\", \"AveragedTemperature\"])\n",
+      "print tmin, tmax, atmin, atmax\n",
+      "print tmin / atmin, tmax / atmax"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Field Parameters\n",
+      "\n",
+      "Field parameters are a method of passing information to derived fields.  For instance, you might pass in information about a vector you want to use as a basis for a coordinate transformation.  yt often uses things like `bulk_velocity` to identify velocities that should be subtracted off.  Here we show how that works:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "sp_small = pf.h.sphere(\"max\", (1.0, 'kpc'))\n",
+      "bv = sp_small.quantities[\"BulkVelocity\"]()\n",
+      "\n",
+      "sp = pf.h.sphere(\"max\", (0.1, 'mpc'))\n",
+      "rv1 = sp.quantities[\"Extrema\"](\"RadialVelocity\")\n",
+      "\n",
+      "sp.clear_data()\n",
+      "sp.set_field_parameter(\"bulk_velocity\", bv)\n",
+      "rv2 = sp.quantities[\"Extrema\"](\"RadialVelocity\")\n",
+      "\n",
+      "print bv\n",
+      "print rv1\n",
+      "print rv2"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 source/bootcamp/Introduction.ipynb
--- /dev/null
+++ b/source/bootcamp/Introduction.ipynb
@@ -0,0 +1,66 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Welcome to the yt bootcamp!\n",
+      "\n",
+      "In this brief tutorial, we'll go over how to load up data, analyze things, inspect your data, and make some visualizations.\n",
+      "\n",
+      "But, before we begin, there are a few places to go if you run into trouble.\n",
+      "\n",
+      "**The yt homepage is at http://yt-project.org/**\n",
+      "\n",
+      "## Source of Help\n",
+      "\n",
+      "There are three places to check for help:\n",
+      "\n",
+      " * The documentation: http://yt-project.org/doc/\n",
+      " * The IRC Channel (`#yt` on `chat.freenode.net`, also at http://yt-project.org/irc.html)\n",
+      " * The `yt-users` mailing list, at http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org\n",
+      "\n",
+      "## Sources of Information\n",
+      "\n",
+      "The first place to go for information about any kind of development is BitBucket at https://bitbucket.org/yt_analysis/yt/ , which contains a bug tracker, the source code, and links to other useful places.\n",
+      "\n",
+      "You can find recipes in the documentation ( http://yt-project.org/doc/ ) under the \"Cookbook\" section.\n",
+      "\n",
+      "There is a portal with access to data and IPython notebooks at http://hub.yt-project.org/ .\n",
+      "\n",
+      "## How to Update yt\n",
+      "\n",
+      "If you ever run into a situation where you need to update your yt installation, simply type this on the command line:\n",
+      "\n",
+      "`yt update`\n",
+      "\n",
+      "This will automatically update it for you.\n",
+      "\n",
+      "## Acquiring the datasets for this tutorial\n",
+      "\n",
+      "To access the datasets that are used in these bootcamp tutorials, you can either download them manually at http://yt-project.org/data/.\n",
+      "\n",
+      "## What's Next?\n",
+      "\n",
+      "The Notebooks are meant to be explored in this order:\n",
+      "\n",
+      "1. Introduction\n",
+      "2. Data Inspection\n",
+      "3. Simple Visualization\n",
+      "4. Data Objects and Time Series\n",
+      "5. Derived Fields and Profiles\n",
+      "6. Volume Rendering"
+     ]
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 source/bootcamp/Simple_Visualization.ipynb
--- /dev/null
+++ b/source/bootcamp/Simple_Visualization.ipynb
@@ -0,0 +1,274 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Simple Visualizations of Data\n",
+      "\n",
+      "Just like in our first notebook, we have to load yt and then some data."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.imods import *"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "For this notebook, we'll load up a cosmology dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "print \"Redshift =\", pf.current_redshift"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "In the terms that yt uses, a projection is a line integral through the domain.  This can either be unweighted (in which case a column density is returned) or weighted, in which case an average value is returned.  Projections are, like all other data objects in yt, full-fledged data objects that churn through data and present that to you.  However, we also provide a simple method of creating Projections and plotting them in a single step.  This is called a Plot Window, here specifically known as a `ProjectionPlot`.  One thing to note is that in yt, we project all the way through the entire domain at a single time.  This means that the first call to projecting can be somewhat time consuming, but panning, zooming and plotting are all quite fast.\n",
+      "\n",
+      "yt is designed to make it easy to make nice plots and straightforward to modify those plots directly.  The cookbook in the documentation includes detailed examples of this."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p = ProjectionPlot(pf, \"y\", \"Density\")\n",
+      "p.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `show` command simply sends the plot to the IPython notebook.  You can also call `p.save()` which will save the plot to the file system.  This function accepts an argument, which will be pre-prended to the filename and can be used to name it based on the width or to supply a location.\n",
+      "\n",
+      "Now we'll zoom and pan a bit."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p.zoom(2.0)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p.pan_rel((0.1, 0.0))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p.zoom(10.0)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p.pan_rel((-0.25, -0.5))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p.zoom(0.1)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If we specify multiple fields, each time we call `show` we get multiple plots back.  Same for `save`!"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p = ProjectionPlot(pf, \"z\", [\"Density\", \"Temperature\"], weight_field=\"Density\")\n",
+      "p.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can adjust the colormap on a field-by-field basis."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "p.set_cmap(\"Temperature\", \"hot\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "And, we can re-center the plot on different locations.  One possible use of this would be to make a single `ProjectionPlot` which you move around to look at different regions in your simulation, saving at each one."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "v, c = pf.h.find_max(\"Density\")\n",
+      "p.set_center((c[0], c[1]))\n",
+      "p.zoom(10)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Okay, let's load up a bigger simulation (from `Enzo_64` this time) and make a slice plot."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf = load(\"Enzo_64/DD0043/data0043\")\n",
+      "s = SlicePlot(pf, \"z\", [\"Density\", \"VelocityMagnitude\"], center=\"max\")\n",
+      "s.set_cmap(\"VelocityMagnitude\", \"kamae\")\n",
+      "s.zoom(10.0)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can adjust the logging of various fields:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s.set_log(\"VelocityMagnitude\", True)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt provides many different annotations for your plots.  You can see all of these in the documentation, or if you type `s.annotate_` and press tab, a list will show up here.  We'll annotate with velocity arrows."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s.annotate_velocity()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Contours can also be overlaid:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s = SlicePlot(pf, \"x\", [\"Density\"], center=\"max\")\n",
+      "s.annotate_contour(\"Temperature\")\n",
+      "s.zoom(2.5)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, we can save out to the file system."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s.save()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 source/bootcamp/Volume_Rendering.ipynb
--- /dev/null
+++ b/source/bootcamp/Volume_Rendering.ipynb
@@ -0,0 +1,95 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# A Brief Demo of Volume Rendering\n",
+      "\n",
+      "This shows a small amount of volume rendering.  Really, just enough to get your feet wet!"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.imods import *\n",
+      "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To create a volume rendering, we need a camera and a transfer function.  We'll use the `ColorTransferFunction`, which accepts (in log space) the minimum and maximum bounds of our transfer function.  This means behavior for data outside these values is undefined.\n",
+      "\n",
+      "We then add on \"layers\" like an onion.  This function can accept a width (here specified) in data units, and also a color map.  Here we add on four layers.\n",
+      "\n",
+      "Finally, we create a camera.  The focal point is `[0.5, 0.5, 0.5]`, the width is 20 kpc (including front-to-back integration) and we specify a transfer function.  Once we've done that, we call `show` to actually cast our rays and display them inline."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "tf = ColorTransferFunction((-28, -24))\n",
+      "tf.add_layers(4, w=0.01)\n",
+      "cam = pf.h.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], 20.0/pf['kpc'], 512, tf)\n",
+      "cam.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If we want to apply a clipping, we can specify the `clip_ratio`.  This will clip the upper bounds to this value times the `std()` of the image array."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cam.show(clip_ratio=4)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "There are several other options we can specify.  Note that here we have turned on the use of ghost zones, shortened the data interval for the transfer function, and widened our gaussian layers."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "tf = ColorTransferFunction((-28, -25))\n",
+      "tf.add_layers(4, w=0.03)\n",
+      "cam = pf.h.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], 20.0/pf['kpc'], 512, tf, no_ghost=False)\n",
+      "cam.show(clip_ratio=4.0)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 source/bootcamp/data_inspection.rst
--- /dev/null
+++ b/source/bootcamp/data_inspection.rst
@@ -0,0 +1,4 @@
+Data Inspection
+---------------
+
+.. notebook:: Data_Inspection.ipynb

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 source/bootcamp/data_objects_and_time_series.rst
--- /dev/null
+++ b/source/bootcamp/data_objects_and_time_series.rst
@@ -0,0 +1,4 @@
+Data Objects and Time Series
+----------------------------
+
+.. notebook:: Data_Objects_and_Time_Series.ipynb

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 source/bootcamp/derived_fields_and_profiles.rst
--- /dev/null
+++ b/source/bootcamp/derived_fields_and_profiles.rst
@@ -0,0 +1,4 @@
+Derived Fields and Profiles
+---------------------------
+
+.. notebook:: Derived_Fields_and_Profiles.ipynb

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 source/bootcamp/index.rst
--- /dev/null
+++ b/source/bootcamp/index.rst
@@ -0,0 +1,49 @@
+yt Bootcamp
+===========
+
+We have been developing a sequence of materials that can be run in the IPython
+notebook that walk through how to look at data and how to operate on data.
+These are not meant to be detailed walkthroughs, but simply short
+introductions.  Their purpose is to let you explore, interactively, some common
+operations that can be done on data with yt!
+
+To get started with the bootcamp, you need to download the repository and start
+the IPython notebook.  The easiest way, if you have mercurial installed, to get
+the repository is to:
+
+.. code-block:: bash
+
+   hg clone https://bitbucket.org/yt_analysis/yt-doc
+
+If you don't, you can download it from `here
+<https://bitbucket.org/yt_analysis/yt-doc/get/tip.tar.bz2>`_
+
+Now you can start the IPython notebook and begin:
+
+.. code-block:: bash
+
+   cd yt-doc/source/bootcamp
+   yt notebook
+
+This command will give you information about the Notebook Server and how to
+access it.  Once you have done so, choose "Introduction" from the list of
+notebooks, which includes an introduction and information about how to download
+the sample data.
+
+.. warning:: The pre-filled out notebooks are *far* less fun than running them
+             yourselves!  Check out the repo and give it a try.
+
+Here are the notebooks, which have been filled in for inspection:
+
+.. toctree::
+   :maxdepth: 1
+
+   introduction
+   data_inspection
+   simple_visualization
+   data_objects_and_time_series
+   derived_fields_and_profiles
+   volume_rendering
+
+Let us know if you would like to contribute other example notebooks, or have
+any suggestions for how these can be improved.

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 source/bootcamp/introduction.rst
--- /dev/null
+++ b/source/bootcamp/introduction.rst
@@ -0,0 +1,4 @@
+Introduction
+------------
+
+.. notebook:: Introduction.ipynb

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 source/bootcamp/simple_visualization.rst
--- /dev/null
+++ b/source/bootcamp/simple_visualization.rst
@@ -0,0 +1,4 @@
+Simple Visualization
+--------------------
+
+.. notebook:: Simple_Visualization.ipynb

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 source/bootcamp/volume_rendering.rst
--- /dev/null
+++ b/source/bootcamp/volume_rendering.rst
@@ -0,0 +1,4 @@
+Volume Rendering
+----------------
+
+.. notebook:: Volume_Rendering.ipynb

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 source/conf.py
--- a/source/conf.py
+++ b/source/conf.py
@@ -30,7 +30,7 @@
 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
               'sphinx.ext.pngmath', 'sphinx.ext.viewcode',
               'sphinx.ext.autosummary', 'numpydocmod', 'youtube',
-              'yt_cookbook', 'yt_colormaps']
+              'yt_cookbook', 'yt_colormaps', 'notebook_sphinxext']
 
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']
@@ -243,5 +243,5 @@
                        'http://matplotlib.sourceforge.net/': None,
                        }
 
-if not on_rtd:
-    autosummary_generate = glob.glob("api/api.rst")
+#if not on_rtd:
+#    autosummary_generate = glob.glob("api/api.rst")

diff -r fbf2479e6f0c50b48b111cced08a09e6ea97af35 -r fde431521333d1d137d7487a28a91298c6100954 source/index.rst
--- a/source/index.rst
+++ b/source/index.rst
@@ -113,7 +113,7 @@
 
    welcome/index
    orientation/index
-   bootcamp
+   bootcamp/index
    workshop
    help/index
    interacting/index

Repository URL: https://bitbucket.org/yt_analysis/yt-doc/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list