[yt-svn] commit/yt: 10 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Mar 17 15:56:21 PDT 2014


10 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/d19208021071/
Changeset:   d19208021071
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-03-11 04:26:26
Summary:     Fixing a long-standing issue with plot window plots that have non-unit aspect ratios.
Affected #:  3 files

diff -r d1277b52f2f480b86902cf2c7fbdbe1a6fe8f289 -r d19208021071fc9890757aff37e05d755d50e51e yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -40,6 +40,8 @@
             self._type_name = "CuttingPlane"
         else:
             self._type_name = viewer._plot_type
+
+
 class PlotMPL(object):
     """A base class for all yt plots made using matplotlib.
 
@@ -89,7 +91,7 @@
     """
     def __init__(self, fsize, axrect, caxrect, zlim, figure, axes, cax):
         """Initialize ImagePlotMPL class object"""
-        PlotMPL.__init__(self, fsize, axrect, figure, axes)
+        super(ImagePlotMPL, self).__init__(fsize, axrect, figure, axes)
         self.zmin, self.zmax = zlim
         if cax is None:
             self.cax = self.figure.add_axes(caxrect)
@@ -98,7 +100,7 @@
             cax.set_position(caxrect)
             self.cax = cax
 
-    def _init_image(self, data, cbnorm, cmap, extent, aspect):
+    def _init_image(self, data, cbnorm, cmap, extent):
         """Store output of imshow in image variable"""
         if (cbnorm == 'log10'):
             norm = matplotlib.colors.LogNorm()
@@ -107,7 +109,7 @@
         extent = [float(e) for e in extent]
         self.image = self.axes.imshow(data.to_ndarray(), origin='lower',
                                       extent=extent, norm=norm, vmin=self.zmin,
-                                      aspect=aspect, vmax=self.zmax, cmap=cmap)
+                                      aspect=1.0, vmax=self.zmax, cmap=cmap)
         self.cb = self.figure.colorbar(self.image, self.cax)
 
     def _repr_png_(self):

diff -r d1277b52f2f480b86902cf2c7fbdbe1a6fe8f289 -r d19208021071fc9890757aff37e05d755d50e51e yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -11,13 +11,11 @@
 from .color_maps import yt_colormaps, is_colormap
 from .plot_modifications import \
     callback_registry
-from .plot_window import \
-    CallbackWrapper
 from .base_plot_types import CallbackWrapper
 
 from yt.funcs import \
     defaultdict, get_image_suffix, \
-    get_ipython_api_version, ensure_list
+    get_ipython_api_version
 from yt.utilities.definitions import axis_names
 from yt.utilities.exceptions import \
     YTNotInsideNotebook
@@ -109,7 +107,7 @@
 
     def __init__(self, data_source, figure_size, fontsize):
         self.data_source = data_source
-        self.figure_size = figure_size
+        self.figure_size = float(figure_size)
         self.plots = PlotDictionary(data_source)
         self._callbacks = []
         self._field_transform = {}
@@ -367,7 +365,7 @@
             The size of the figure on the longest axis (in units of inches),
             including the margins but not the colorbar.
         """
-        self.figure_size = size
+        self.figure_size = float(size)
         return self
 
     def save(self, name=None, mpl_kwargs=None):

diff -r d1277b52f2f480b86902cf2c7fbdbe1a6fe8f289 -r d19208021071fc9890757aff37e05d755d50e51e yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -17,8 +17,6 @@
 import matplotlib
 import cStringIO
 import types
-import os
-import __builtin__
 
 from matplotlib.delaunay.triangulate import Triangulation as triang
 from matplotlib.mathtext import MathTextParser
@@ -27,21 +25,20 @@
 
 from ._mpl_imports import FigureCanvasAgg
 from .image_writer import apply_colormap
+from .base_plot_types import ImagePlotMPL
 from .fixed_resolution import \
     FixedResolutionBuffer, \
     ObliqueFixedResolutionBuffer, \
     OffAxisProjectionFixedResolutionBuffer
 from .plot_modifications import get_smallest_appropriate_unit, \
     callback_registry
-from .base_plot_types import ImagePlotMPL, CallbackWrapper
 from .plot_container import \
     ImagePlotContainer, log_transform, linear_transform, \
     invalidate_data, invalidate_plot, apply_callback
 
 from yt.funcs import \
-    mylog, defaultdict, iterable, ensure_list, \
-    fix_axis, get_image_suffix, assert_valid_width_tuple, \
-    get_ipython_api_version
+    mylog, iterable, ensure_list, \
+    fix_axis, assert_valid_width_tuple
 from yt.units.unit_object import Unit
 from yt.utilities.png_writer import \
     write_png_to_string
@@ -52,13 +49,12 @@
 from yt.utilities.math_utils import \
     ortho_find
 from yt.utilities.exceptions import \
-     YTUnitNotRecognized, YTInvalidWidthError, YTCannotParseUnitDisplayName, \
-     YTNotInsideNotebook
+    YTUnitNotRecognized, YTInvalidWidthError, YTCannotParseUnitDisplayName
 
 from yt.data_objects.time_series import \
     TimeSeriesData
 from yt.units.yt_array import YTArray, YTQuantity
-    
+
 # Some magic for dealing with pyparsing being included or not
 # included in matplotlib (not in gentoo, yes in everything else)
 # Also accounting for the fact that in 1.2.0, pyparsing got renamed.
@@ -243,7 +239,7 @@
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True,
                  periodic=True, origin='center-window', oblique=False,
-                 window_size=10.0, fields=None, fontsize=18, setup=False):
+                 window_size=8.0, fields=None, fontsize=18, setup=False):
         if not hasattr(self, "pf"):
             self.pf = data_source.pf
             ts = self._initialize_dataset(self.pf)
@@ -684,8 +680,8 @@
             else:
                 (unit_x, unit_y) = self._axes_unit_names
 
-            extentx = [(self.xlim[i] - xc).in_units(unit_x) for i in (0,1)]
-            extenty = [(self.ylim[i] - yc).in_units(unit_y) for i in (0,1)]
+            extentx = [float((self.xlim[i] - xc).in_units(unit_x)) for i in (0, 1)]
+            extenty = [float((self.ylim[i] - yc).in_units(unit_y)) for i in (0, 1)]
 
             extent = extentx + extenty
 
@@ -694,20 +690,6 @@
             else:
                 zlim = (None, None)
 
-            plot_aspect = \
-              (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
-
-            # This sets the size of the figure, and defaults to making one of
-            # the dimensions smaller.  This should protect against giant images
-            # in the case of a very large aspect ratio.
-            cbar_frac = 0.0
-            if plot_aspect > 1.0:
-                size = (self.figure_size*(1.+cbar_frac),
-                        self.figure_size/plot_aspect)
-            else:
-                size = (plot_aspect*self.figure_size*(1.+cbar_frac),
-                        self.figure_size)
-
             image = self._frb[f]
 
             if image.max() == image.min():
@@ -729,10 +711,11 @@
                     axes = self.plots[f].axes
                     cax = self.plots[f].cax
 
-            self.plots[f] = WindowPlotMPL(image, self._field_transform[f].name,
-                                          self._colormaps[f], extent, 1.0,
-                                          zlim, size, fp.get_size(), fig, axes,
-                                          cax)
+            self.plots[f] = WindowPlotMPL(
+                image, self._field_transform[f].name,
+                self._colormaps[f], extent, zlim,
+                self.figure_size, fp.get_size(),
+                fig, axes, cax)
 
             axes_unit_labels = ['', '']
             comoving = False
@@ -1534,51 +1517,45 @@
             self._field_transform[field] = linear_transform
 
 class WindowPlotMPL(ImagePlotMPL):
-    def __init__(
-            self, data, cbname, cmap, extent, aspect, zlim, size, fontsize,
-            figure, axes, cax):
+    def __init__(self, data, cbname, cmap, extent, zlim, figure_size, fontsize,
+                 figure, axes, cax):
         self._draw_colorbar = True
         self._draw_axes = True
-        self._cache_layout(size, fontsize)
+        self._fontsize = fontsize
+        self._fontscale = float(fontsize) / 18.0
+        self._figure_size = figure_size
+        self._extent = extent
 
-        # Make room for a colorbar
-        self.input_size = size
-        self.fsize = [size[0] + self._cbar_inches[self._draw_colorbar], size[1]]
+        # set default layout
+        size, axrect, caxrect = self._get_best_layout()
 
-        # Compute layout
-        axrect, caxrect = self._get_best_layout(fontsize)
-        if np.any(np.array(axrect) < 0):
-            msg = 'The axis ratio of the requested plot is very narrow. ' \
-                  'There is a good chance the plot will not look very good, ' \
-                  'consider making the plot manually using ' \
-                  'FixedResolutionBuffer and matplotlib.'
-            mylog.warn(msg)
-            axrect  = (0.07, 0.10, 0.80, 0.80)
-            caxrect = (0.87, 0.10, 0.04, 0.80)
-        ImagePlotMPL.__init__(
-            self, self.fsize, axrect, caxrect, zlim, figure, axes, cax)
-        self._init_image(data, cbname, cmap, extent, aspect)
-        self.image.axes.ticklabel_format(scilimits=(-2,3))
+        super(WindowPlotMPL, self).__init__(
+            size, axrect, caxrect, zlim, figure, axes, cax)
+
+        self._init_image(data, cbname, cmap, extent)
+
+        self.image.axes.ticklabel_format(scilimits=(-2, 3))
         if cbname == 'linear':
             self.cb.formatter.set_scientific(True)
-            self.cb.formatter.set_powerlimits((-2,3))
+            self.cb.formatter.set_powerlimits((-2, 3))
             self.cb.update_ticks()
 
     def _toggle_axes(self, choice):
         self._draw_axes = choice
         self.axes.get_xaxis().set_visible(choice)
         self.axes.get_yaxis().set_visible(choice)
-        axrect, caxrect = self._get_best_layout()
+        size, axrect, caxrect = self._get_best_layout()
         self.axes.set_position(axrect)
         self.cax.set_position(caxrect)
+        self.figure.set_size_inches(*size)
 
     def _toggle_colorbar(self, choice):
         self._draw_colorbar = choice
         self.cax.set_visible(choice)
-        self.fsize = [self.input_size[0] + self._cbar_inches[choice], self.input_size[1]]
-        axrect, caxrect = self._get_best_layout()
+        size, axrect, caxrect = self._get_best_layout()
         self.axes.set_position(axrect)
         self.cax.set_position(caxrect)
+        self.figure.set_size_inches(*size)
 
     def hide_axes(self):
         self._toggle_axes(False)
@@ -1596,66 +1573,59 @@
         self._toggle_colorbar(True)
         return self
 
-    def _cache_layout(self, size, fontsize):
-        self._cbar_inches = {}
-        self._text_buffx = {}
-        self._text_bottomy = {}
-        self._text_topy = {}
+    def _get_best_layout(self):
+        norm_size = self._figure_size/8.
 
-        self._aspect = 1.0*size[0]/size[1]
-        self._fontscale = fontsize / 18.0
+        if self._draw_colorbar:
+            cb_frac = .3*norm_size
+            cb_text_frac = 1.2*norm_size*self._fontscale
+        else:
+            cb_frac = 0.0
+            cb_text_frac = 0.0
 
-        # Leave room for a colorbar, if we are drawing it.
-        self._cbar_inches[True] = self._fontscale*0.7
-        self._cbar_inches[False] = 0
+        if self._draw_axes:
+            x_ax_frac = 0.85*norm_size*self._fontscale
+            y_ax_frac = 0.75*norm_size*self._fontscale
+            top_buff = 0.2*norm_size
+        else:
+            x_ax_frac = 0.0
+            y_ax_frac = 0.0
+            top_buff = 0.0
 
-        # add buffers for text, and a bit of whitespace on top
-        self._text_buffx[True] = self._fontscale * 1.0/(size[0] + self._cbar_inches[True])
-        self._text_bottomy[True] = self._fontscale * 0.7/size[1]
-        self._text_topy[True] = self._fontscale * 0.3/size[1]
+        extent = self._extent
+        aspect = (extent[1] - extent[0])/(extent[3] - extent[2])
 
-        # No buffer for text if we're not drawing axes
-        self._text_buffx[False] = 0
-        self._text_bottomy[False] = 0
-        self._text_topy[False] = 0
+        # Ensure the figsize along the long axis is always equal to _figure_size
+        if aspect >= 1.0:
+            ximsize = self._figure_size
+            yimsize = self._figure_size/aspect
+        if aspect < 1.0:
+            ximsize = self._figure_size*aspect
+            yimsize = self._figure_size
 
-    def _get_best_layout(self, fontsize=18):
-        # calculate how much room the colorbar takes
-        cbar_frac = self._cbar_inches[self._draw_colorbar]/self.fsize[0]
+        xbins = np.array([x_ax_frac, ximsize, cb_frac, cb_text_frac])
+        ybins = np.array([y_ax_frac, yimsize, top_buff])
 
-        # Calculate y fraction, then use to make x fraction.
-        yfrac = 1.0-self._text_bottomy[self._draw_axes]-self._text_topy[self._draw_axes]
-        ysize = yfrac*self.fsize[1]
-        xsize = self._aspect*ysize
-        xfrac = xsize/self.fsize[0]
+        size = [xbins.sum(), ybins.sum()]
 
-        # Now make sure it all fits!
-        xbig = xfrac + self._text_buffx[self._draw_axes] + 2.0*cbar_frac
-        ybig = yfrac + self._text_bottomy[self._draw_axes] + self._text_topy[self._draw_axes]
-
-        if xbig > 1:
-            xsize /= xbig
-            ysize /= xbig
-        if ybig > 1:
-            xsize /= ybig
-            ysize /= ybig
-        xfrac = xsize/self.fsize[0]
-        yfrac = ysize/self.fsize[1]
+        x_frac_widths = xbins/size[0]
+        y_frac_widths = ybins/size[1]
 
         axrect = (
-            self._text_buffx[self._draw_axes],
-            self._text_bottomy[self._draw_axes],
-            xfrac,
-            yfrac
+            x_frac_widths[0],
+            y_frac_widths[0],
+            x_frac_widths[1],
+            y_frac_widths[1],
         )
 
         caxrect = (
-            self._text_buffx[self._draw_axes]+xfrac,
-            self._text_bottomy[self._draw_axes],
-            cbar_frac/4.,
-            yfrac
+            x_frac_widths[0]+x_frac_widths[1],
+            y_frac_widths[0],
+            x_frac_widths[2],
+            y_frac_widths[1],
         )
-        return axrect, caxrect
+
+        return size, axrect, caxrect
 
 def SlicePlot(pf, normal=None, fields=None, axis=None, *args, **kwargs):
     r"""


https://bitbucket.org/yt_analysis/yt/commits/e65977431a23/
Changeset:   e65977431a23
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-03-11 04:49:49
Summary:     Fixing an issue with plots that have unit_x != unit_y
Affected #:  2 files

diff -r d19208021071fc9890757aff37e05d755d50e51e -r e65977431a23bac97af6650241e82cb67f901ec7 yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -100,7 +100,7 @@
             cax.set_position(caxrect)
             self.cax = cax
 
-    def _init_image(self, data, cbnorm, cmap, extent):
+    def _init_image(self, data, cbnorm, cmap, extent, aspect):
         """Store output of imshow in image variable"""
         if (cbnorm == 'log10'):
             norm = matplotlib.colors.LogNorm()
@@ -109,7 +109,7 @@
         extent = [float(e) for e in extent]
         self.image = self.axes.imshow(data.to_ndarray(), origin='lower',
                                       extent=extent, norm=norm, vmin=self.zmin,
-                                      aspect=1.0, vmax=self.zmax, cmap=cmap)
+                                      aspect=aspect, vmax=self.zmax, cmap=cmap)
         self.cb = self.figure.colorbar(self.image, self.cax)
 
     def _repr_png_(self):

diff -r d19208021071fc9890757aff37e05d755d50e51e -r e65977431a23bac97af6650241e82cb67f901ec7 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -680,8 +680,10 @@
             else:
                 (unit_x, unit_y) = self._axes_unit_names
 
-            extentx = [float((self.xlim[i] - xc).in_units(unit_x)) for i in (0, 1)]
-            extenty = [float((self.ylim[i] - yc).in_units(unit_y)) for i in (0, 1)]
+            aspect = np.float64(self.pf.quan(1.0, unit_y)/self.pf.quan(1.0, unit_x))
+
+            extentx = [(self.xlim[i] - xc).in_units(unit_x) for i in (0, 1)]
+            extenty = [(self.ylim[i] - yc).in_units(unit_y) for i in (0, 1)]
 
             extent = extentx + extenty
 
@@ -715,7 +717,7 @@
                 image, self._field_transform[f].name,
                 self._colormaps[f], extent, zlim,
                 self.figure_size, fp.get_size(),
-                fig, axes, cax)
+                aspect, fig, axes, cax)
 
             axes_unit_labels = ['', '']
             comoving = False
@@ -1518,7 +1520,7 @@
 
 class WindowPlotMPL(ImagePlotMPL):
     def __init__(self, data, cbname, cmap, extent, zlim, figure_size, fontsize,
-                 figure, axes, cax):
+                 aspect, figure, axes, cax):
         self._draw_colorbar = True
         self._draw_axes = True
         self._fontsize = fontsize
@@ -1532,7 +1534,7 @@
         super(WindowPlotMPL, self).__init__(
             size, axrect, caxrect, zlim, figure, axes, cax)
 
-        self._init_image(data, cbname, cmap, extent)
+        self._init_image(data, cbname, cmap, extent, aspect)
 
         self.image.axes.ticklabel_format(scilimits=(-2, 3))
         if cbname == 'linear':
@@ -1584,9 +1586,9 @@
             cb_text_frac = 0.0
 
         if self._draw_axes:
-            x_ax_frac = 0.85*norm_size*self._fontscale
+            x_ax_frac = 1.0*norm_size*self._fontscale
             y_ax_frac = 0.75*norm_size*self._fontscale
-            top_buff = 0.2*norm_size
+            top_buff = 0.3*norm_size
         else:
             x_ax_frac = 0.0
             y_ax_frac = 0.0


https://bitbucket.org/yt_analysis/yt/commits/8e863dbbd762/
Changeset:   8e863dbbd762
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-03-14 22:27:44
Summary:     Merging with mainline.
Affected #:  282 files

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/cheatsheet.tex
--- a/doc/cheatsheet.tex
+++ b/doc/cheatsheet.tex
@@ -217,21 +217,21 @@
 in the snapshot. \\
 \texttt{val, loc = pf.h.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
 the maximum of the field \texttt{Density} and its \texttt{loc}ation. \\
-\texttt{sp = pf.h.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
+\texttt{sp = pf.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
 container. {\it cen} may be a coordinate, or ``max'' which 
 centers on the max density point. {\it radius} may be a float in 
 code units or a tuple of ({\it length, unit}).\\
 
-\texttt{re = pf.h.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
+\texttt{re = pf.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
 rectilinear data container. {\it cen} is required but not used.
 {\it left} and {\it right edge} are coordinate values that define the region.
 
-\texttt{di = pf.h.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
+\texttt{di = pf.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
 Create a cylindrical data container centered at {\it cen} along the 
 direction set by {\it normal},with total length
  2$\times${\it height} and with radius {\it radius}. \\
  
- \texttt{bl = pf.h.boolean({\it constructor})} \textemdash\ Create a boolean data
+ \texttt{bl = pf.boolean({\it constructor})} \textemdash\ Create a boolean data
  container. {\it constructor} is a list of pre-defined non-boolean 
  data containers with nested boolean logic using the
  ``AND'', ``NOT'', or ``OR'' operators. E.g. {\it constructor=}

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -60,7 +60,7 @@
  * Avoid Enzo-isms.  This includes but is not limited to:
    * Hard-coding parameter names that are the same as those in Enzo.  The
      following translation table should be of some help.  Note that the
-     parameters are now properties on a StaticOutput subclass: you access them
+     parameters are now properties on a Dataset subclass: you access them
      like pf.refine_by .
      * RefineBy => refine_by
      * TopGridRank => dimensionality

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -17,7 +17,7 @@
 everywhere, "Enzo" fields in Enzo datasets, "Orion" fields in Orion datasets,
 and so on.
 
-Try using the ``pf.h.field_list`` and ``pf.h.derived_field_list`` to view the
+Try using the ``pf.field_list`` and ``pf.derived_field_list`` to view the
 native and derived fields available for your dataset respectively. For example
 to display the native fields in alphabetical order:
 
@@ -25,7 +25,7 @@
 
   from yt.mods import *
   pf = load("Enzo_64/DD0043/data0043")
-  for i in sorted(pf.h.field_list):
+  for i in sorted(pf.field_list):
     print i
 
 .. note:: Universal fields will be overridden by a code-specific field.

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/how_to_develop_yt.txt
--- a/doc/how_to_develop_yt.txt
+++ b/doc/how_to_develop_yt.txt
@@ -79,8 +79,8 @@
       This is where interfaces to codes are created.  Within each subdirectory of
       yt/frontends/ there must exist the following files, even if empty:
 
-      * data_structures.py, where subclasses of AMRGridPatch, StaticOutput and
-        GridGeometryHandler are defined.
+      * data_structures.py, where subclasses of AMRGridPatch, Dataset and
+        GridIndex are defined.
       * io.py, where a subclass of IOHandler is defined.
       * misc.py, where any miscellaneous functions or classes are defined.
       * definitions.py, where any definitions specific to the frontend are

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -503,10 +503,8 @@
     cd $LIB
     if [ ! -z `echo $LIB | grep h5py` ]
     then
-        shift
 	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
     else
-        shift
         ( ${DEST_DIR}/bin/python2.7 setup.py build   $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
     fi
     ( ${DEST_DIR}/bin/python2.7 setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
--- a/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
+++ b/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:874e85c86cd80a516bb61775b566cd46766c60bdf8f865336bf9dd3505f83821"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -18,7 +19,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.imods import *\n",
+      "%matplotlib inline\n",
+      "from yt.mods import *\n",
       "from yt.analysis_modules.api import ParticleTrajectories\n",
       "from yt.config import ytcfg\n",
       "path = ytcfg.get(\"yt\", \"test_data_dir\")"
@@ -220,7 +222,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "sp = pf.h.sphere(\"max\", (0.5, \"mpc\"))\n",
+      "sp = pf.sphere(\"max\", (0.5, \"mpc\"))\n",
       "indices = sp[\"particle_index\"][sp[\"particle_type\"] == 1]"
      ],
      "language": "python",
@@ -238,7 +240,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "my_fns = glob.glob(path+\"/enzo_tiny_cosmology/DD*/*.hierarchy\")\n",
+      "my_fns = glob.glob(path+\"/enzo_tiny_cosmology/DD*/*.index\")\n",
       "my_fns.sort()\n",
       "trajs = ParticleTrajectories(my_fns, indices)"
      ],

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:e5d3c629592c8aacbabf2e3fab2660703298886b8de6f36eb7cdc1f60b726496"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -88,7 +89,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.imods import *\n",
+      "%matplotlib inline\n",
+      "from yt.mods import *\n",
       "from yt.analysis_modules.api import SZProjection\n",
       "\n",
       "pf = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -22,7 +22,7 @@
 selecting only those clumps that are gravitationally bound.
 
 Once the clump-finder has finished, the user can write out a set of quantities for each clump in the 
-hierarchy.  Additional info items can also be added.  We also provide a recipe
+index.  Additional info items can also be added.  We also provide a recipe
 for finding clumps in :ref:`cookbook-find_clumps`.
 
 Treecode Optimization
@@ -85,7 +85,7 @@
   from yt.mods import *
   
   pf = load("DD0000")
-  sp = pf.h.sphere([0.5, 0.5, 0.5], radius=0.1)
+  sp = pf.sphere([0.5, 0.5, 0.5], radius=0.1)
   
   ratio = sp.quantities["IsBound"](truncate=False, include_thermal_energy=True,
       treecode=True, opening_angle=2.0)
@@ -98,7 +98,7 @@
   from yt.mods import *
   
   pf = load("DD0000")
-  sp = pf.h.sphere([0.5, 0.5, 0.5], radius=0.1)
+  sp = pf.sphere([0.5, 0.5, 0.5], radius=0.1)
   
   ratio = sp.quantities["IsBound"](truncate=False, include_thermal_energy=True,
       treecode=False)
@@ -151,7 +151,7 @@
 region of analysis. Up to about 100,000 cells,
 the treecode is actually slower than the brute-force method. This is due to
 the fact that with fewer cells, smaller geometric distances,
-and a shallow AMR hierarchy, the treecode
+and a shallow AMR index, the treecode
 method has very little chance to be applied. The calculation is overall
 slower due to the overhead of the treecode method & startup costs. This
 explanation is further strengthened by the fact that the accuracy of the

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -107,7 +107,7 @@
 
 .. code-block:: python
 
-  ell = pf.h.ellipsoid(ell_param[0],
+  ell = pf.ellipsoid(ell_param[0],
   ell_param[1],
   ell_param[2],
   ell_param[3],

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -35,7 +35,7 @@
 
 .. code:: python
 
-    from yt.imods import *
+    from yt.mods import *
     from yt.analysis_modules.api import *
     from yt.utilities.cosmology import Cosmology
 
@@ -89,7 +89,7 @@
 
 .. code:: python
 
-    sp = pf.h.sphere("c", (250., "kpc"))
+    sp = pf.sphere("c", (250., "kpc"))
 
 This will serve as our ``data_source`` that we will use later. Next, we
 need to create the ``SpectralModel`` instance that will determine how
@@ -445,7 +445,7 @@
 
 .. code:: python
 
-   sphere = pf.h.sphere(pf.domain_center, 1.0/pf["mpc"])
+   sphere = pf.sphere(pf.domain_center, 1.0/pf["mpc"])
        
    A = 6000.
    exp_time = 2.0e5

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/analysis_modules/radmc3d_export.rst
--- a/doc/source/analyzing/analysis_modules/radmc3d_export.rst
+++ b/doc/source/analyzing/analysis_modules/radmc3d_export.rst
@@ -18,7 +18,7 @@
 
 To compute thermal emission intensities, RADMC-3D needs a file called
 "dust_density.inp" that specifies the density of dust for every cell in the AMR
-hierarchy. To generate this file, first import the RADMC-3D exporter, which 
+index. To generate this file, first import the RADMC-3D exporter, which 
 is not loaded into your environment by default:
 
 .. code-block:: python
@@ -73,7 +73,7 @@
 
 The file format required for line emission is slightly different. The following script will generate 
 two files, one called "numderdens_co.inp", which contains the number density of CO molecules
-for every cell in the hierarchy, and another called "gas-velocity.inp", which is useful if you want 
+for every cell in the index, and another called "gas-velocity.inp", which is useful if you want 
 to include doppler broadening.
 
 .. code-block:: python

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/analysis_modules/running_halofinder.rst
--- a/doc/source/analyzing/analysis_modules/running_halofinder.rst
+++ b/doc/source/analyzing/analysis_modules/running_halofinder.rst
@@ -448,7 +448,7 @@
   pf = load('data0458')
   # Note that the first term below, [0.5]*3, defines the center of
   # the region and is not used. It can be any value.
-  sv = pf.h.region([0.5]*3, [0.21, .21, .72], [.28, .28, .79])
+  sv = pf.region([0.5]*3, [0.21, .21, .72], [.28, .28, .79])
   halos = HaloFinder(pf, subvolume = sv)
   halos.write_out("sv.out")
 
@@ -493,10 +493,10 @@
   from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
 
   #find all of our simulation files
-  files = glob.glob("Enzo_64/DD*/\*hierarchy")
+  files = glob.glob("Enzo_64/DD*/\*index")
   #hopefully the file name order is chronological
   files.sort()
-  ts = TimeSeriesData.from_filenames(files[:])
+  ts = DatasetSeries.from_filenames(files[:])
   rh = RockstarHaloFinder(ts)
   rh.run()
 
@@ -522,7 +522,7 @@
     the width of the smallest grid element in the simulation from the
     last data snapshot (i.e. the one where time has evolved the
     longest) in the time series:
-    ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
+    ``pf_last.index.get_smallest_dx() * pf_last['mpch']``.
   * ``total_particles``, if supplied, this is a pre-calculated
     total number of dark matter
     particles present in the simulation. For example, this is useful
@@ -624,12 +624,12 @@
     
     def main():
         import enzo
-        pf = EnzoStaticOutputInMemory()
+        pf = EnzoDatasetInMemory()
         mine = ytcfg.getint('yt','__topcomm_parallel_rank')
         size = ytcfg.getint('yt','__topcomm_parallel_size')
 
         # Call rockstar.
-        ts = TimeSeriesData([pf])
+        ts = DatasetSeries([pf])
         outbase = "./rockstar_halos_%04d" % pf['NumberOfPythonTopGridCalls']
         rh = RockstarHaloFinder(ts, num_readers = size,
             outbase = outbase)

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/analysis_modules/star_analysis.rst
--- a/doc/source/analyzing/analysis_modules/star_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/star_analysis.rst
@@ -52,7 +52,7 @@
   from yt.mods import *
   from yt.analysis_modules.star_analysis.api import *
   pf = load("data0030")
-  re = pf.h.region([0.5,0.5,0.5], [0.4,0.5,0.6], [0.5,0.6,0.7])
+  re = pf.region([0.5,0.5,0.5], [0.4,0.5,0.6], [0.5,0.6,0.7])
   # This puts the particle data for *all* the particles in the region re
   # into the arrays sm and ct.
   sm = re["ParticleMassMsun"]
@@ -148,7 +148,7 @@
 
 .. code-block:: python
 
-  re = pf.h.region([0.5,0.5,0.5], [0.4,0.5,0.6], [0.5,0.6,0.7])
+  re = pf.region([0.5,0.5,0.5], [0.4,0.5,0.6], [0.5,0.6,0.7])
   spec.calculate_spectrum(data_source=re)
 
 If a subset of stars are desired, call it like this. ``star_mass`` is in units
@@ -157,7 +157,7 @@
 
 .. code-block:: python
 
-  re = pf.h.region([0.5,0.5,0.5], [0.4,0.5,0.6], [0.5,0.6,0.7])
+  re = pf.region([0.5,0.5,0.5], [0.4,0.5,0.6], [0.5,0.6,0.7])
   # This puts the particle data for *all* the particles in the region re
   # into the arrays sm, ct and metal.
   sm = re["ParticleMassMsun"]

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/analysis_modules/sunrise_export.rst
--- a/doc/source/analyzing/analysis_modules/sunrise_export.rst
+++ b/doc/source/analyzing/analysis_modules/sunrise_export.rst
@@ -18,7 +18,7 @@
 	from yt.mods import *
 	import numpy as na
 
-	pf = ARTStaticOutput(file_amr)
+	pf = ARTDataset(file_amr)
 	potential_value,center=pf.h.find_min('Potential_New')
 	root_cells = pf.domain_dimensions[0]
 	le = np.floor(root_cells*center) #left edge
@@ -69,7 +69,7 @@
 
 	for x,a in enumerate(zip(pos,age)): #loop over stars
 	    center = x*pf['kpc']
-	    grid,idx = find_cell(pf.h.grids[0],center)
+	    grid,idx = find_cell(pf.index.grids[0],center)
 	    pk[i] = grid['Pk'][idx]
 
 This code is how Sunrise calculates the pressure, so we can add our own derived field:
@@ -114,7 +114,7 @@
 Sanity Check: Gas & Stars Line Up
 ---------------------------------
 
-If you add your star particles separately from the gas cell hierarchy, then it is worth checking that they still lined up once they've been loaded into Sunrise. This is fairly easy to do with a useful 'auxiliary' run. In Sunrise, set all of your rays to zero, (nrays_nonscatter, nrays_scatter,nrays_intensity,nrays_ir ) except for nrays_aux, and this will produce an mcrx FITS file with a gas map, a metals map, a temperature*gass_mass map and a stellar map for each camera. As long as you keep some cameras at theta,phi = 0,0 or 90,0, etc., then a standard yt projection down the code's xyz axes should look identical:
+If you add your star particles separately from the gas cell index, then it is worth checking that they still lined up once they've been loaded into Sunrise. This is fairly easy to do with a useful 'auxiliary' run. In Sunrise, set all of your rays to zero, (nrays_nonscatter, nrays_scatter,nrays_intensity,nrays_ir ) except for nrays_aux, and this will produce an mcrx FITS file with a gas map, a metals map, a temperature*gass_mass map and a stellar map for each camera. As long as you keep some cameras at theta,phi = 0,0 or 90,0, etc., then a standard yt projection down the code's xyz axes should look identical:
 
 .. code-block:: python
 

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/analysis_modules/two_point_functions.rst
--- a/doc/source/analyzing/analysis_modules/two_point_functions.rst
+++ b/doc/source/analyzing/analysis_modules/two_point_functions.rst
@@ -872,7 +872,7 @@
     
     # We work in simulation's units, these are for conversion.
     vol_conv = pf['cm'] ** 3
-    sm = pf.h.get_smallest_dx()**3
+    sm = pf.index.get_smallest_dx()**3
     
     # Our density limit, in gm/cm**3
     dens = 2e-31

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/creating_derived_fields.rst
--- a/doc/source/analyzing/creating_derived_fields.rst
+++ b/doc/source/analyzing/creating_derived_fields.rst
@@ -88,7 +88,7 @@
 
    >>> from yt.mods import *
    >>> pf = load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100")
-   >>> pf.h.field_list
+   >>> pf.field_list
    ['dens', 'temp', 'pres', 'gpot', 'divb', 'velx', 'vely', 'velz', 'magx', 'magy', 'magz', 'magp']
    >>> pf.field_info['dens']._units
    '\\rm{g}/\\rm{cm}^{3}'

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/external_analysis.rst
--- a/doc/source/analyzing/external_analysis.rst
+++ b/doc/source/analyzing/external_analysis.rst
@@ -21,7 +21,7 @@
    pf = load("DD0010/DD0010")
    rt_grids = []
 
-   for grid in pf.h.grids:
+   for grid in pf.index.grids:
        rt_grid = radtrans.RegularBox(
             grid.LeftEdge, grid.RightEdge,
             grid["density"], grid["temperature"], grid["metallicity"])

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -43,7 +43,7 @@
 
 .. code-block:: python
 
-   sl = pf.h.slice(0, 0.5)
+   sl = pf.slice(0, 0.5)
    frb = FixedResolutionBuffer(sl, (0.3, 0.5, 0.6, 0.8), (512, 512))
    my_image = frb["density"]
 
@@ -98,7 +98,7 @@
 
 .. code-block:: python
 
-   source = pf.h.sphere( (0.3, 0.6, 0.4), 1.0/pf['pc'])
+   source = pf.sphere( (0.3, 0.6, 0.4), 1.0/pf['pc'])
    profile = BinnedProfile1D(source, 128, "density", 1e-24, 1e-10)
    profile.add_fields("cell_mass", weight = None)
    profile.add_fields("temperature")
@@ -128,7 +128,7 @@
 
 .. code-block:: python
 
-   source = pf.h.sphere( (0.3, 0.6, 0.4), 1.0/pf['pc'])
+   source = pf.sphere( (0.3, 0.6, 0.4), 1.0/pf['pc'])
    prof2d = BinnedProfile2D(source, 128, "density", 1e-24, 1e-10, True,
                                     128, "temperature", 10, 10000, True)
    prof2d.add_fields("cell_mass", weight = None)
@@ -163,7 +163,7 @@
 
 To calculate the values along a line connecting two points in a simulation, you
 can use the object :class:`~yt.data_objects.data_containers.AMRRayBase`,
-accessible as the ``ray`` property on a hierarchy.  (See :ref:`using-objects`
+accessible as the ``ray`` property on a index.  (See :ref:`using-objects`
 for more information on this.)  To do so, you can supply two points and access
 fields within the returned object.  For instance, this code will generate a ray
 between the points (0.3, 0.5, 0.9) and (0.1, 0.8, 0.5) and examine the density
@@ -171,7 +171,7 @@
 
 .. code-block:: python
 
-   ray = pf.h.ray(  (0.3, 0.5, 0.9), (0.1, 0.8, 0.5) )
+   ray = pf.ray(  (0.3, 0.5, 0.9), (0.1, 0.8, 0.5) )
    print ray["density"]
 
 The points are ordered, but the ray is also traversing cells of varying length,

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/ionization_cube.py
--- a/doc/source/analyzing/ionization_cube.py
+++ b/doc/source/analyzing/ionization_cube.py
@@ -8,14 +8,14 @@
 def IonizedHydrogen(field, data):
     return data["HII_Density"]/(data["HI_Density"]+data["HII_Density"])
 
-ts = TimeSeriesData.from_filenames("SED800/DD*/*.hierarchy", parallel = 8)
+ts = DatasetSeries.from_filenames("SED800/DD*/*.index", parallel = 8)
 
 ionized_z = np.zeros(ts[0].domain_dimensions, dtype="float32")
 
 t1 = time.time()
 for pf in ts.piter():
     z = pf.current_redshift
-    for g in parallel_objects(pf.h.grids, njobs = 16):
+    for g in parallel_objects(pf.index.grids, njobs = 16):
         i1, j1, k1 = g.get_global_startindex() # Index into our domain
         i2, j2, k2 = g.get_global_startindex() + g.ActiveDimensions
         # Look for the newly ionized gas

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -83,7 +83,7 @@
 
 .. code-block:: python
 
-   sp = pf.h.sphere([0.5, 0.5, 0.5], 10.0/pf['kpc'])
+   sp = pf.sphere([0.5, 0.5, 0.5], 10.0/pf['kpc'])
 
 and then look at the temperature of its cells within it via:
 
@@ -107,8 +107,8 @@
 .. code-block:: python
 
    pf = load("my_data")
-   print pf.h.field_list
-   print pf.h.derived_field_list
+   print pf.field_list
+   print pf.derived_field_list
 
 When a field is added, it is added to a container that hangs off of the
 parameter file, as well.  All of the field creation options
@@ -132,11 +132,11 @@
 Available Objects
 -----------------
 
-Objects are instantiated by direct access of a hierarchy.  Each of the objects
-that can be generated by a hierarchy are in fact fully-fledged data objects
+Objects are instantiated by direct access of a index.  Each of the objects
+that can be generated by a index are in fact fully-fledged data objects
 respecting the standard protocol for interaction.
 
-The following objects are available, all of which hang off of the hierarchy
+The following objects are available, all of which hang off of the index
 object.  To access them, you would do something like this (as for a
 :class:`region`):
 
@@ -144,7 +144,7 @@
 
    from yt.mods import *
    pf = load("RedshiftOutput0005")
-   reg = pf.h.region([0.5, 0.5, 0.5], [0.0, 0.0, 0.0], [1.0, 1.0, 1.0])
+   reg = pf.region([0.5, 0.5, 0.5], [0.0, 0.0, 0.0], [1.0, 1.0, 1.0])
 
 .. include:: _obj_docstrings.inc
 
@@ -292,7 +292,7 @@
 
 .. code-block:: python
 
-   sp = pf.h.sphere("max", (1.0, 'pc'))
+   sp = pf.sphere("max", (1.0, 'pc'))
    contour_values, connected_sets = sp.extract_connected_sets(
         "density", 3, 1e-30, 1e-20)
 
@@ -369,20 +369,20 @@
 :mod:`~yt.utilities.ParameterFileStorage` via :class:`~yt.utilities.ParameterFileStorage.ParameterFileStore`.)
 
 To save an object, you can either save it in the ``.yt`` file affiliated with
-the hierarchy or as a standalone file.  For instance, using
-:meth:`~yt.data_objects.hierarchy.save_object` we can save a sphere.
+the index or as a standalone file.  For instance, using
+:meth:`~yt.data_objects.index.save_object` we can save a sphere.
 
 .. code-block:: python
 
    from yt.mods import *
    pf = load("my_data")
-   sp = pf.h.sphere([0.5, 0.5, 0.5], 10.0/pf['kpc'])
+   sp = pf.sphere([0.5, 0.5, 0.5], 10.0/pf['kpc'])
 
    pf.h.save_object(sp, "sphere_to_analyze_later")
 
 
 In a later session, we can load it using
-:meth:`~yt.data_objects.hierarchy.load_object`:
+:meth:`~yt.data_objects.index.load_object`:
 
 .. code-block:: python
 
@@ -399,14 +399,14 @@
    from yt.mods import *
 
    pf = load("my_data")
-   sp = pf.h.sphere([0.5, 0.5, 0.5], 10.0/pf['kpc'])
+   sp = pf.sphere([0.5, 0.5, 0.5], 10.0/pf['kpc'])
 
    sp.save_object("my_sphere", "my_storage_file.cpkl")
 
 This will store the object as ``my_sphere`` in the file
 ``my_storage_file.cpkl``, which will be created or accessed using the standard
 python module :mod:`shelve`.  Note that if a filename is not supplied, it will
-be saved via the hierarchy, as above.
+be saved via the index, as above.
 
 To re-load an object saved this way, you can use the shelve module directly:
 
@@ -430,6 +430,6 @@
           loading and storing objects -- so in theory you could even save a
           list of objects!
 
-This method works for clumps, as well, and the entire clump hierarchy will be
+This method works for clumps, as well, and the entire clump index will be
 stored and restored upon load.
 

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -166,7 +166,7 @@
 Spatial Decomposition
 +++++++++++++++++++++
 
-During this process, the hierarchy will be decomposed along either all three
+During this process, the index will be decomposed along either all three
 axes or along an image plane, if the process is that of projection.  This type
 of parallelism is overall less efficient than grid-based parallelism, but it
 has been shown to obtain good results overall.
@@ -283,7 +283,7 @@
 -----------------------------
 
 The same :func:`parallel_objects` machinery discussed above is turned on by
-default when using a ``TimeSeriesData`` object (see :ref:`time-series-analysis`)
+default when using a ``DatasetSeries`` object (see :ref:`time-series-analysis`)
 to iterate over simulation outputs.  The syntax for this is very simple.  As an
 example, we can use the following script to find the angular momentum vector in
 a 1 pc sphere centered on the maximum density cell in a large number of
@@ -292,7 +292,7 @@
 .. code-block:: python
 
    from yt.pmods import *
-   ts = TimeSeriesData.from_filenames("DD*/output_*", parallel = True)
+   ts = DatasetSeries.from_filenames("DD*/output_*", parallel = True)
    sphere = ts.sphere("max", (1.0, "pc"))
    L_vecs = sphere.quantities["AngularMomentumVector"]()
 
@@ -302,15 +302,15 @@
 explicitly set ``parallel = True`` as in the above example. 
 
 One could get the same effect by iterating over the individual parameter files
-in the TimeSeriesData object:
+in the DatasetSeries object:
 
 .. code-block:: python
 
    from yt.pmods import *
-   ts = TimeSeriesData.from_filenames("DD*/output_*", parallel = True)
+   ts = DatasetSeries.from_filenames("DD*/output_*", parallel = True)
    my_storage = {}
    for sto,pf in ts.piter(storage=my_storage):
-       sphere = pf.h.sphere("max", (1.0, "pc"))
+       sphere = pf.sphere("max", (1.0, "pc"))
        L_vec = sphere.quantities["AngularMomentumVector"]()
        sto.result_id = pf.parameter_filename
        sto.result = L_vec
@@ -329,7 +329,7 @@
 .. code-block:: python
 
    from yt.pmods import *
-   ts = TimeSeriesData.from_filenames("DD*/output_*", parallel = 4)
+   ts = DatasetSeries.from_filenames("DD*/output_*", parallel = 4)
    sphere = ts.sphere("max", (1.0, "pc))
    L_vecs = sphere.quantities["AngularMomentumVector"]()
 

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/time_series_analysis.rst
--- a/doc/source/analyzing/time_series_analysis.rst
+++ b/doc/source/analyzing/time_series_analysis.rst
@@ -17,11 +17,11 @@
        process_output(pf)
 
 But this is not really very nice.  This ends up requiring a lot of maintenance.
-The :class:`~yt.data_objects.time_series.TimeSeriesData` object has been
+The :class:`~yt.data_objects.time_series.DatasetSeries` object has been
 designed to remove some of this clunkiness and present an easier, more unified
 approach to analyzing sets of data.  Even better,
-:class:`~yt.data_objects.time_series.TimeSeriesData` works in parallel by
-default (see :ref:`parallel-computation`), so you can use a ``TimeSeriesData``
+:class:`~yt.data_objects.time_series.DatasetSeries` works in parallel by
+default (see :ref:`parallel-computation`), so you can use a ``DatasetSeries``
 object to quickly and easily parallelize your analysis.  Since doing the same
 analysis task on many simulation outputs is 'embarrassingly' parallel, this
 naturally allows for almost arbitrary speedup - limited only by the number of
@@ -33,9 +33,9 @@
 creating your own, and these operators can be applied either to datasets on the
 whole or to subregions of individual datasets.
 
-The simplest mechanism for creating a ``TimeSeriesData`` object is to use the
+The simplest mechanism for creating a ``DatasetSeries`` object is to use the
 class method
-:meth:`~yt.data_objects.time_series.TimeSeriesData.from_filenames`.  This
+:meth:`~yt.data_objects.time_series.DatasetSeries.from_filenames`.  This
 method accepts a list of strings that can be supplied to ``load``.  For
 example:
 
@@ -43,7 +43,7 @@
 
    from yt.mods import *
    filenames = ["DD0030/output_0030", "DD0040/output_0040"]
-   ts = TimeSeriesData.from_filenames(filenames)
+   ts = DatasetSeries.from_filenames(filenames)
 
 This will create a new time series, populated with the output files ``DD0030``
 and ``DD0040``.  This object, here called ``ts``, can now be analyzed in bulk.
@@ -53,29 +53,29 @@
 .. code-block:: python
 
    from yt.mods import *
-   ts = TimeSeriesData.from_filenames("*/*.hierarchy")
+   ts = DatasetSeries.from_filenames("*/*.index")
 
 Analyzing Each Dataset In Sequence
 ----------------------------------
 
-The :class:`~yt.data_objects.time_series.TimeSeriesData` object has two primary
+The :class:`~yt.data_objects.time_series.DatasetSeries` object has two primary
 methods of iteration.  The first is a very simple iteration, where each object
 is returned for iteration:
 
 .. code-block:: python
 
    from yt.mods import *
-   ts = TimeSeriesData.from_filenames("*/*.hierarchy")
+   ts = DatasetSeries.from_filenames("*/*.index")
    for pf in ts:
        print pf.current_time
 
 This can also operate in parallel, using
-:meth:`~yt.data_objects.time_series.TimeSeriesData.piter`.  For more examples,
+:meth:`~yt.data_objects.time_series.DatasetSeries.piter`.  For more examples,
 see:
 
  * :ref:`parallel-time-series-analysis`
  * The cookbook recipe for :ref:`cookbook-time-series-analysis`
- * :class:`~yt.data_objects.time_series.TimeSeriesData`
+ * :class:`~yt.data_objects.time_series.DatasetSeries`
 
 Prepared Time Series Analysis
 -----------------------------
@@ -97,13 +97,13 @@
 .. code-block:: python
 
    from yt.mods import *
-   ts = TimeSeries.from_filenames("*/*.hierarchy")
+   ts = TimeSeries.from_filenames("*/*.index")
    max_rho = ts.tasks["MaximumValue"]("density")
 
 When we call the task, the time series object executes the task on each
 component parameter file.  The results are then returned to the user.  More
 complex, multi-task evaluations can be conducted by using the
-:meth:`~yt.data_objects.time_series.TimeSeriesData.eval` call, which accepts a
+:meth:`~yt.data_objects.time_series.DatasetSeries.eval` call, which accepts a
 list of analysis tasks.
 
 Analysis Tasks Applied to Objects
@@ -122,7 +122,7 @@
 .. code-block:: python
 
    from yt.mods import *
-   ts = TimeSeries.from_filenames("*/*.hierarchy")
+   ts = TimeSeries.from_filenames("*/*.index")
    sphere = ts.sphere("max", (1.0, "pc"))
    L_vecs = sphere.quantities["AngularMomentumVector"]()
 
@@ -155,7 +155,7 @@
    print ms
 
 This allows you to create your own analysis tasks that will be then available
-to time series data objects.  Since ``TimeSeriesData`` objects iterate over
+to time series data objects.  Since ``DatasetSeries`` objects iterate over
 filenames in parallel by default, this allows for transparent parallelization. 
 
 .. _analyzing-an-entire-simulation:
@@ -165,7 +165,7 @@
 
 The parameter file used to run a simulation contains all the information 
 necessary to know what datasets should be available.  The ``simulation`` 
-convenience function allows one to create a ``TimeSeriesData`` object of all 
+convenience function allows one to create a ``DatasetSeries`` object of all 
 or a subset of all data created by a single simulation.
 
 .. note:: Currently only implemented for Enzo.  Other simulation types coming 
@@ -179,7 +179,7 @@
   my_sim = simulation('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo',
                       find_outputs=False)
 
-Then, create a ``TimeSeriesData`` object with the :meth:`get_time_series` 
+Then, create a ``DatasetSeries`` object with the :meth:`get_time_series` 
 function.  With no additional keywords, the time series will include every 
 dataset.  If the **find_outputs** keyword is set to True, a search of the 
 simulation directory will be performed looking for potential datasets.  These 
@@ -249,7 +249,7 @@
    the requested times or redshifts.  If None, the nearest output is always 
    taken.  Default: None.
 
- * **parallel** (*bool*/*int*): If True, the generated TimeSeriesData will 
+ * **parallel** (*bool*/*int*): If True, the generated DatasetSeries will 
    divide the work such that a single processor works on each dataset.  If an
    integer is supplied, the work will be divided into that number of jobs.
    Default: True.

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
--- a/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
+++ b/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:8be3d1eb683160bad5efbb176d4bb310b50e8af4f1a4ad356edb2ae5d5a227d6"
+  "signature": "sha256:b22680b89964ce22188c795c6d0e498dc5654851919949ec3c5ac8822002a43d"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -37,8 +37,8 @@
       "from yt.mods import *\n",
       "ds = load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
       "          \n",
-      "dd = ds.h.all_data()\n",
-      "maxval, maxloc = ds.h.find_max('density')\n",
+      "dd = ds.all_data()\n",
+      "maxval, maxloc = ds.find_max('density')\n",
       "\n",
       "dens = dd['density']"
      ],

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
--- a/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
+++ b/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:97e766f98a390fee4e2e87d8ea55ba5855e4f08ed6a1bfe031eaf71fb41c4822"
+  "signature": "sha256:448380e74a746d19dc1eecfe222c0e798a87a4ac285e4f50e2598316086c5ee8"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -96,13 +96,13 @@
      "input": [
       "from yt.mods import *\n",
       "\n",
-      "ts = TimeSeriesData.from_filenames(\"Enzo_64/DD????/data????\")\n",
+      "ts = DatasetSeries.from_filenames(\"Enzo_64/DD????/data????\")\n",
       "\n",
       "storage = {}\n",
       "\n",
-      "for sto, pf in ts.piter(storage=storage):\n",
-      "    sto.result_id = pf.current_time\n",
-      "    sto.result = pf.length_unit\n",
+      "for sto, ds in ts.piter(storage=storage):\n",
+      "    sto.result_id = ds.current_time\n",
+      "    sto.result = ds.length_unit\n",
       "\n",
       "if is_root():\n",
       "    for t in sorted(storage.keys()):\n",

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/analyzing/units/5)_Units_and_plotting.ipynb
--- a/doc/source/analyzing/units/5)_Units_and_plotting.ipynb
+++ b/doc/source/analyzing/units/5)_Units_and_plotting.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:f22e2acb721ada2e47ad0736deb074ac1bb919b82021b387d8d5624a53889025"
+  "signature": "sha256:e8ff0337ab94f14a8a4edb2b40ea7daaa7afe6b6b29c602207406700afc8157b"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -29,9 +29,8 @@
      "collapsed": false,
      "input": [
       "from yt.mods import *\n",
-      "ds = load('HiResIsolatedGalaxy/DD0044/DD0044')\n",
-      "\n",
-      "slc = SlicePlot(ds, 2, 'density', center=[0.53, 0.53, 0.53], width=(15, 'kpc'))\n",
+      "ds = load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
+      "slc = SlicePlot(ds, 2, 'density', center=[0.5, 0.5, 0.5], width=(15, 'kpc'))\n",
       "slc.set_figure_size(6)"
      ],
      "language": "python",
@@ -102,7 +101,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "dd = ds.h.all_data()\n",
+      "dd = ds.all_data()\n",
       "plot = ProfilePlot(dd, 'density', 'temperature', weight_field='cell_mass')\n",
       "plot.show()"
      ],

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/bootcamp/2)_Data_Inspection.ipynb
--- a/doc/source/bootcamp/2)_Data_Inspection.ipynb
+++ b/doc/source/bootcamp/2)_Data_Inspection.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:15cdc35ddb8b1b938967237e17534149f734f4e7a61ebd37d74b675f8059da20"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -13,14 +14,14 @@
      "source": [
       "# Starting Out and Loading Data\n",
       "\n",
-      "We're going to get started by loading up yt.  This next command brings all of the libraries into memory and sets up our environment.  Note that in most scripts, you will want to import from ``yt.mods`` rather than ``yt.imods``.  But using ``yt.imods`` gets you some nice stuff for the IPython notebook, which we'll use below."
+      "We're going to get started by loading up yt.  This next command brings all of the libraries into memory and sets up our environment."
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.imods import *"
+      "from yt.mods import *"
      ],
      "language": "python",
      "metadata": {},
@@ -37,7 +38,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
+      "ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
      ],
      "language": "python",
      "metadata": {},
@@ -49,14 +50,14 @@
      "source": [
       "## Fields and Facts\n",
       "\n",
-      "When you call the `load` function, yt tries to do very little -- this is designed to be a fast operation, just setting up some information about the simulation.  Now, the first time you access the \"hierarchy\" (shorthand is `.h`) it will read and load the mesh and then determine where data is placed in the physical domain and on disk.  Once it knows that, yt can tell you some statistics about the simulation:"
+      "When you call the `load` function, yt tries to do very little -- this is designed to be a fast operation, just setting up some information about the simulation.  Now, the first time you access the \"index\" it will read and load the mesh and then determine where data is placed in the physical domain and on disk.  Once it knows that, yt can tell you some statistics about the simulation:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf.h.print_stats()"
+      "ds.print_stats()"
      ],
      "language": "python",
      "metadata": {},
@@ -73,7 +74,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf.h.field_list"
+      "ds.field_list"
      ],
      "language": "python",
      "metadata": {},
@@ -90,7 +91,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf.h.derived_field_list"
+      "ds.derived_field_list"
      ],
      "language": "python",
      "metadata": {},
@@ -107,7 +108,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print pf.field_info[\"gas\", \"vorticity_x\"].get_source()"
+      "print ds.field_info[\"gas\", \"vorticity_x\"].get_source()"
      ],
      "language": "python",
      "metadata": {},
@@ -124,7 +125,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print pf.domain_width"
+      "print ds.domain_width"
      ],
      "language": "python",
      "metadata": {},
@@ -141,9 +142,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print pf.domain_width.in_units(\"kpc\")\n",
-      "print pf.domain_width.in_units(\"au\")\n",
-      "print pf.domain_width.in_units(\"mile\")"
+      "print ds.domain_width.in_units(\"kpc\")\n",
+      "print ds.domain_width.in_units(\"au\")\n",
+      "print ds.domain_width.in_units(\"mile\")"
      ],
      "language": "python",
      "metadata": {},
@@ -162,7 +163,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print pf.h.grid_left_edge"
+      "print ds.index.grid_left_edge"
      ],
      "language": "python",
      "metadata": {},
@@ -172,14 +173,14 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "But, you may have to access information about individual grid objects!  Each grid object mediates accessing data from the disk and has a number of attributes that tell you about it.  The hierarchy (`pf.h` here) has an attribute `grids` which is all of the grid objects."
+      "But, you may have to access information about individual grid objects!  Each grid object mediates accessing data from the disk and has a number of attributes that tell you about it.  The index (`ds.index` here) has an attribute `grids` which is all of the grid objects."
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print pf.h.grids[0]"
+      "print ds.index.grids[0]"
      ],
      "language": "python",
      "metadata": {},
@@ -189,7 +190,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "g = pf.h.grids[0]\n",
+      "g = ds.index.grids[0]\n",
       "print g"
      ],
      "language": "python",
@@ -258,7 +259,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "gs = pf.h.select_grids(pf.h.max_level)"
+      "gs = ds.index.select_grids(ds.index.max_level)"
      ],
      "language": "python",
      "metadata": {},
@@ -302,7 +303,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "for f in pf.h.field_list:\n",
+      "for f in ds.field_list:\n",
       "    fv = g[f]\n",
       "    if fv.size == 0: continue\n",
       "    print f, fv.min(), fv.max()"
@@ -326,7 +327,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "sp = pf.h.sphere(\"max\", (10, 'kpc'))"
+      "sp = ds.sphere(\"max\", (10, 'kpc'))"
      ],
      "language": "python",
      "metadata": {},

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/bootcamp/3)_Simple_Visualization.ipynb
--- a/doc/source/bootcamp/3)_Simple_Visualization.ipynb
+++ b/doc/source/bootcamp/3)_Simple_Visualization.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:eb5fbf5eb55a9c8997c687f072c8c6030e74bef0048a72b4f74a06893c11b80a"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -20,7 +21,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.imods import *"
+      "from yt.mods import *"
      ],
      "language": "python",
      "metadata": {},
@@ -37,8 +38,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
-      "print \"Redshift =\", pf.current_redshift"
+      "ds = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "print \"Redshift =\", ds.current_redshift"
      ],
      "language": "python",
      "metadata": {},
@@ -57,7 +58,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "p = ProjectionPlot(pf, \"y\", \"density\")\n",
+      "p = ProjectionPlot(ds, \"y\", \"density\")\n",
       "p.show()"
      ],
      "language": "python",
@@ -134,7 +135,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "p = ProjectionPlot(pf, \"z\", [\"density\", \"temperature\"], weight_field=\"density\")\n",
+      "p = ProjectionPlot(ds, \"z\", [\"density\", \"temperature\"], weight_field=\"density\")\n",
       "p.show()"
      ],
      "language": "python",
@@ -169,7 +170,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "v, c = pf.h.find_max(\"density\")\n",
+      "v, c = ds.find_max(\"density\")\n",
       "p.set_center((c[0], c[1]))\n",
       "p.zoom(10)"
      ],
@@ -188,8 +189,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(\"Enzo_64/DD0043/data0043\")\n",
-      "s = SlicePlot(pf, \"z\", [\"density\", \"velocity_magnitude\"], center=\"max\")\n",
+      "ds = load(\"Enzo_64/DD0043/data0043\")\n",
+      "s = SlicePlot(ds, \"z\", [\"density\", \"velocity_magnitude\"], center=\"max\")\n",
       "s.set_cmap(\"velocity_magnitude\", \"kamae\")\n",
       "s.zoom(10.0)"
      ],

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
--- a/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
+++ b/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:41293a66cd6fd5eae6da2d0343549144dc53d72e83286999faab3cf21d801f51"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -13,14 +14,16 @@
      "source": [
       "# Data Objects and Time Series Data\n",
       "\n",
-      "Just like before, we will load up yt."
+      "Just like before, we will load up yt.  Since we'll be using pylab to plot some data in this notebook, we additionally tell matplotlib to place plots inline inside the notebook."
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.imods import *"
+      "%matplotlib inline\n",
+      "from yt.mods import *\n",
+      "from matplotlib import pylab"
      ],
      "language": "python",
      "metadata": {},
@@ -32,7 +35,7 @@
      "source": [
       "## Time Series Data\n",
       "\n",
-      "Unlike before, instead of loading a single dataset, this time we'll load a bunch which we'll examine in sequence.  This command creates a `TimeSeriesData` object, which can be iterated over (including in parallel, which is outside the scope of this bootcamp) and analyzed.  There are some other helpful operations it can provide, but we'll stick to the basics here.\n",
+      "Unlike before, instead of loading a single dataset, this time we'll load a bunch which we'll examine in sequence.  This command creates a `DatasetSeries` object, which can be iterated over (including in parallel, which is outside the scope of this bootcamp) and analyzed.  There are some other helpful operations it can provide, but we'll stick to the basics here.\n",
       "\n",
       "Note that you can specify either a list of filenames, or a glob (i.e., asterisk) pattern in this."
      ]
@@ -41,7 +44,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ts = TimeSeriesData.from_filenames(\"enzo_tiny_cosmology/*/*.hierarchy\")"
+      "ts = DatasetSeries(\"enzo_tiny_cosmology/*/*.hierarchy\")"
      ],
      "language": "python",
      "metadata": {},
@@ -53,7 +56,7 @@
      "source": [
       "### Example 1: Simple Time Series\n",
       "\n",
-      "As a simple example of how we can use this functionality, let's find the min and max of the density as a function of time in this simulation.  To do this we use the construction `for pf in ts` where `pf` means \"Parameter File\" and `ts` is the \"Time Series\" we just loaded up.  For each parameter file, we'll create an object (`dd`) that covers the entire domain.  (`all_data` is a shorthand function for this.)  We'll then call the Derived Quantity `Extrema`, and append the min and max to our extrema outputs."
+      "As a simple example of how we can use this functionality, let's find the min and max of the density as a function of time in this simulation.  To do this we use the construction `for ds in ts` where `ds` means \"Dataset\" and `ts` is the \"Time Series\" we just loaded up.  For each parameter file, we'll create an object (`dd`) that covers the entire domain.  (`all_data` is a shorthand function for this.)  We'll then call the `extrema` Derived Quantity, and append the min and max to our extrema outputs."
      ]
     },
     {
@@ -62,10 +65,10 @@
      "input": [
       "rho_ex = []\n",
       "times = []\n",
-      "for pf in ts:\n",
-      "    dd = pf.h.all_data()\n",
+      "for ds in ts:\n",
+      "    dd = ds.all_data()\n",
       "    rho_ex.append(dd.quantities.extrema(\"density\"))\n",
-      "    times.append(pf.current_time.in_units(\"ys\"))\n",
+      "    times.append(pf.current_time.in_units(\"Gyr\"))\n",
       "rho_ex = np.array(rho_ex)"
      ],
      "language": "python",
@@ -107,16 +110,16 @@
      "input": [
       "mass = []\n",
       "zs = []\n",
-      "for pf in ts:\n",
-      "    halos = HaloFinder(pf)\n",
-      "    dd = pf.h.all_data()\n",
+      "for ds in ts:\n",
+      "    halos = HaloFinder(ds)\n",
+      "    dd = ds.all_data()\n",
       "    total_mass = dd.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
       "    total_in_baryons = 0.0\n",
       "    for halo in halos:\n",
       "        sp = halo.get_sphere()\n",
       "        total_in_baryons += sp.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
       "    mass.append(total_in_baryons/total_mass)\n",
-      "    zs.append(pf.current_redshift)"
+      "    zs.append(ds.current_redshift)"
      ],
      "language": "python",
      "metadata": {},
@@ -158,7 +161,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ray = pf.h.ray([0.1, 0.2, 0.3], [0.9, 0.8, 0.7])\n",
+      "ray = ds.ray([0.1, 0.2, 0.3], [0.9, 0.8, 0.7])\n",
       "pylab.semilogy(ray[\"t\"], ray[\"density\"])"
      ],
      "language": "python",
@@ -209,8 +212,8 @@
      "collapsed": false,
      "input": [
       "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
-      "v, c = pf.h.find_max(\"density\")\n",
-      "sl = pf.h.slice(0, c[0])\n",
+      "v, c = ds.find_max(\"density\")\n",
+      "sl = ds.slice(0, c[0])\n",
       "print sl[\"index\", \"x\"], sl[\"index\", \"z\"], sl[\"pdx\"]\n",
       "print sl[\"gas\", \"density\"].shape"
      ],
@@ -222,7 +225,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "If we want to do something interesting with a Slice, we can turn it into a `FixedResolutionBuffer`.  This object can be queried and will return a 2D array of values."
+      "If we want to do something interesting with a `Slice`, we can turn it into a `FixedResolutionBuffer`.  This object can be queried and will return a 2D array of values."
      ]
     },
     {
@@ -240,7 +243,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "yt provides a few functions for writing arrays to disk, particularly in image form.  Here we'll write out the log of Density, and then use IPython to display it back here.  Note that for the most part, you will probably want to use a `PlotWindow` for this, but in the case that it is useful you can directly manipulate the data."
+      "yt provides a few functions for writing arrays to disk, particularly in image form.  Here we'll write out the log of `density`, and then use IPython to display it back here.  Note that for the most part, you will probably want to use a `PlotWindow` for this, but in the case that it is useful you can directly manipulate the data."
      ]
     },
     {
@@ -270,7 +273,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "cp = pf.h.cutting([0.2, 0.3, 0.5], \"max\")\n",
+      "cp = ds.cutting([0.2, 0.3, 0.5], \"max\")\n",
       "pw = cp.to_pw(fields = [\"density\"])"
      ],
      "language": "python",
@@ -329,7 +332,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "cg = pf.h.covering_grid(2, [0.0, 0.0, 0.0], pf.domain_dimensions * 2**2)\n",
+      "cg = ds.covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n",
       "print cg[\"density\"].shape"
      ],
      "language": "python",
@@ -347,7 +350,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "scg = pf.h.smoothed_covering_grid(2, [0.0, 0.0, 0.0], pf.domain_dimensions * 2**2)\n",
+      "scg = ds.smoothed_covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n",
       "print scg[\"density\"].shape"
      ],
      "language": "python",

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
--- a/doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
+++ b/doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:a19d451f3b4dcfeed448caa22c2cac35c46958e0646c19c226b1e467b76d0718"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -20,7 +21,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.imods import *"
+      "%matplotlib inline\n",
+      "from yt.mods import *\n",
+      "from matplotlib import pylab"
      ],
      "language": "python",
      "metadata": {},
@@ -32,7 +35,7 @@
      "source": [
       "## Derived Fields\n",
       "\n",
-      "This is an example of the simplest possible way to create a derived field.  All derived fields are defined by a function and some metadata; that metadata can include units, LaTeX-friendly names, conversion factors, and so on.  Fields can be defined in the way in the next cell.  What this does is create a function which accepts two arguments and then provide the units for that field.  In this case, our field is `Dinosaurs` and our units are `Trex/s`.  The function itself can access any fields that are in the simulation, and it does so by requesting data from the object called `data`."
+      "This is an example of the simplest possible way to create a derived field.  All derived fields are defined by a function and some metadata; that metadata can include units, LaTeX-friendly names, conversion factors, and so on.  Fields can be defined in the way in the next cell.  What this does is create a function which accepts two arguments and then provide the units for that field.  In this case, our field is `dinosaurs` and our units are `K*cm/s`.  The function itself can access any fields that are in the simulation, and it does so by requesting data from the object called `data`."
      ]
     },
     {
@@ -58,8 +61,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
-      "dd = pf.h.all_data()\n",
+      "ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "dd = ds.all_data()\n",
       "print dd.quantities.keys()"
      ],
      "language": "python",
@@ -70,7 +73,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "One interesting question is, what are the minimum and maximum values of dinosaur production rates in our isolated galaxy?  We can do that by examining the `Extrema` quantity -- the exact same way that we would for Density, Temperature, and so on."
+      "One interesting question is, what are the minimum and maximum values of dinosaur production rates in our isolated galaxy?  We can do that by examining the `extrema` quantity -- the exact same way that we would for density, temperature, and so on."
      ]
     },
     {
@@ -113,7 +116,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "sp = pf.h.sphere(\"max\", (10.0, 'kpc'))\n",
+      "sp = ds.sphere(\"max\", (10.0, 'kpc'))\n",
       "bv = sp.quantities.bulk_velocity()\n",
       "L = sp.quantities.angular_momentum_vector()\n",
       "rho_min, rho_max = sp.quantities.extrema(\"density\")\n",
@@ -133,7 +136,7 @@
       "\n",
       "We do this using the objects `Profile1D`, `Profile2D`, and `Profile3D`.  The first two are the most common since they are the easiest to visualize.\n",
       "\n",
-      "This first set of commands manually creates a `BinnedProfile1D` from the sphere we created earlier, binned in 32 bins according to density between `rho_min` and `rho_max`, and then takes the density-weighted average of the fields `Temperature` and (previously-defined) `Dinosaurs`.  We then plot it in a loglog plot."
+      "This first set of commands manually creates a profile object the sphere we created earlier, binned in 32 bins according to density between `rho_min` and `rho_max`, and then takes the density-weighted average of the fields `temperature` and (previously-defined) `dinosaurs`.  We then plot it in a loglog plot."
      ]
     },
     {
@@ -152,7 +155,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Now we plot the `Dinosaurs` field."
+      "Now we plot the `dinosaurs` field."
      ]
     },
     {
@@ -197,10 +200,10 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "sp_small = pf.h.sphere(\"max\", (50.0, 'kpc'))\n",
+      "sp_small = ds.sphere(\"max\", (50.0, 'kpc'))\n",
       "bv = sp_small.quantities[\"BulkVelocity\"]()\n",
       "\n",
-      "sp = pf.h.sphere(\"max\", (0.1, 'Mpc'))\n",
+      "sp = ds.sphere(\"max\", (0.1, 'Mpc'))\n",
       "rv1 = sp.quantities[\"Extrema\"](\"radial_velocity\")\n",
       "\n",
       "sp.clear_data()\n",

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/bootcamp/6)_Volume_Rendering.ipynb
--- a/doc/source/bootcamp/6)_Volume_Rendering.ipynb
+++ b/doc/source/bootcamp/6)_Volume_Rendering.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:2929940fc3977b495aa124dee851f7602d61e073ed65407dd95e7cf597684b35"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -20,9 +21,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.imods import *\n",
-      "import yt.units as u\n",
-      "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
+      "from yt.mods import *\n",
+      "ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
      ],
      "language": "python",
      "metadata": {},
@@ -45,7 +45,7 @@
      "input": [
       "tf = ColorTransferFunction((-28, -24))\n",
       "tf.add_layers(4, w=0.01)\n",
-      "cam = pf.h.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20, 'kpc'), 512, tf, fields=[\"density\"])\n",
+      "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20, 'kpc'), 512, tf, fields=[\"density\"])\n",
       "cam.show()"
      ],
      "language": "python",
@@ -56,7 +56,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "If we want to apply a clipping, we can specify the `clip_ratio`.  This will clip the upper bounds to this value times the `std()` of the image array."
+      "If we want to apply a clipping, we can specify the `clip_ratio`.  This will clip the upper bounds to this value times the standard deviation of the values in the image array."
      ]
     },
     {
@@ -82,7 +82,7 @@
      "input": [
       "tf = ColorTransferFunction((-28, -25))\n",
       "tf.add_layers(4, w=0.03)\n",
-      "cam = pf.h.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20.0, 'kpc'), 512, tf, no_ghost=False)\n",
+      "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20.0, 'kpc'), 512, tf, no_ghost=False)\n",
       "cam.show(clip_ratio=4.0)"
      ],
      "language": "python",

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/aligned_cutting_plane.py
--- a/doc/source/cookbook/aligned_cutting_plane.py
+++ b/doc/source/cookbook/aligned_cutting_plane.py
@@ -6,7 +6,7 @@
 # Create a 1 kpc radius sphere, centered on the max density.  Note that this
 # sphere is very small compared to the size of our final plot, and it has a
 # non-axially aligned L vector.
-sp = pf.h.sphere("center", (15.0, "kpc"))
+sp = pf.sphere("center", (15.0, "kpc"))
 
 # Get the angular momentum vector for the sphere.
 L = sp.quantities.angular_momentum_vector()

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/boolean_data_objects.py
--- a/doc/source/cookbook/boolean_data_objects.py
+++ b/doc/source/cookbook/boolean_data_objects.py
@@ -2,22 +2,22 @@
 
 pf = load("Enzo_64/DD0043/data0043") # load data
 # Make a few data ojbects to start.
-re1 = pf.h.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4], [0.6, 0.6, 0.6])
-re2 = pf.h.region([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.6, 0.6, 0.6])
-sp1 = pf.h.sphere([0.5, 0.5, 0.5], 0.05)
-sp2 = pf.h.sphere([0.1, 0.2, 0.3], 0.1)
+re1 = pf.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4], [0.6, 0.6, 0.6])
+re2 = pf.region([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.6, 0.6, 0.6])
+sp1 = pf.sphere([0.5, 0.5, 0.5], 0.05)
+sp2 = pf.sphere([0.1, 0.2, 0.3], 0.1)
 # The "AND" operator. This will make a region identical to re2.
-bool1 = pf.h.boolean([re1, "AND", re2])
+bool1 = pf.boolean([re1, "AND", re2])
 xp = bool1["particle_position_x"]
 # The "OR" operator. This will make a region identical to re1.
-bool2 = pf.h.boolean([re1, "OR", re2])
+bool2 = pf.boolean([re1, "OR", re2])
 # The "NOT" operator. This will make a region like re1, but with the corner
 # that re2 covers cut out.
-bool3 = pf.h.boolean([re1, "NOT", re2])
+bool3 = pf.boolean([re1, "NOT", re2])
 # Disjoint regions can be combined with the "OR" operator.
-bool4 = pf.h.boolean([sp1, "OR", sp2])
+bool4 = pf.boolean([sp1, "OR", sp2])
 # Find oddly-shaped overlapping regions.
-bool5 = pf.h.boolean([re2, "AND", sp1])
+bool5 = pf.boolean([re2, "AND", sp1])
 # Nested logic with parentheses.
 # This is re1 with the oddly-shaped region cut out.
-bool6 = pf.h.boolean([re1, "NOT", "(", re1, "AND", sp1, ")"])
+bool6 = pf.boolean([re1, "NOT", "(", re1, "AND", sp1, ")"])

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/embedded_javascript_animation.ipynb
--- a/doc/source/cookbook/embedded_javascript_animation.ipynb
+++ b/doc/source/cookbook/embedded_javascript_animation.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:578ca4fbc3831e9093489f06939abce9cde845b6cf75d901a3c429abc270f550"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -22,7 +23,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.imods import *\n",
+      "from yt.mods import *\n",
       "from JSAnimation import IPython_display\n",
       "from matplotlib import animation"
      ],

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/embedded_webm_animation.ipynb
--- a/doc/source/cookbook/embedded_webm_animation.ipynb
+++ b/doc/source/cookbook/embedded_webm_animation.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:67844f8c2c184fc51aa62440cc05623ee85f252edde6faaa0d7b6617c3f33dfe"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -20,7 +21,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.imods import *\n",
+      "from yt.mods import *\n",
       "from matplotlib import animation"
      ],
      "language": "python",

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/extract_fixed_resolution_data.py
--- a/doc/source/cookbook/extract_fixed_resolution_data.py
+++ b/doc/source/cookbook/extract_fixed_resolution_data.py
@@ -10,7 +10,7 @@
 
 # Now, we construct an object that describes the data region and structure we
 # want
-cube = pf.h.covering_grid(2, # The level we are willing to extract to; higher
+cube = pf.covering_grid(2, # The level we are willing to extract to; higher
                              # levels than this will not contribute to the data!
                           left_edge=[0.0, 0.0, 0.0], 
                           # And any fields to preload (this is optional!)

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -13,7 +13,7 @@
 # We want to find clumps over the entire dataset, so we'll just grab the whole
 # thing!  This is a convenience parameter that prepares an object that covers
 # the whole domain.  Note, though, that it will load on demand and not before!
-data_source = pf.h.disk([0.5, 0.5, 0.5], [0., 0., 1.], 
+data_source = pf.disk([0.5, 0.5, 0.5], [0., 0., 1.], 
                         8./pf.units['kpc'], 1./pf.units['kpc'])
 
 # Now we set some sane min/max values between which we want to find contours.
@@ -37,9 +37,9 @@
 
 # As it goes, it appends the information about all the sub-clumps to the
 # master-clump.  Among different ways we can examine it, there's a convenience
-# function for outputting the full hierarchy to a file.
-f = open('%s_clump_hierarchy.txt' % pf,'w')
-amods.level_sets.write_clump_hierarchy(master_clump,0,f)
+# function for outputting the full index to a file.
+f = open('%s_clump_index.txt' % pf,'w')
+amods.level_sets.write_clump_index(master_clump,0,f)
 f.close()
 
 # We can also output some handy information, as well.
@@ -47,7 +47,7 @@
 amods.level_sets.write_clumps(master_clump,0,f)
 f.close()
 
-# We can traverse the clump hierarchy to get a list of all of the 'leaf' clumps
+# We can traverse the clump index to get a list of all of the 'leaf' clumps
 leaf_clumps = get_lowest_clumps(master_clump)
 
 # If you'd like to visualize these clumps, a list of clumps can be supplied to

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/free_free_field.py
--- a/doc/source/cookbook/free_free_field.py
+++ b/doc/source/cookbook/free_free_field.py
@@ -69,7 +69,7 @@
 
 pf = load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
 
-sphere = pf.h.sphere(pf.domain_center, (100., "kpc"))
+sphere = pf.sphere(pf.domain_center, (100., "kpc"))
 
 # Print out the total luminosity at 1 keV for the sphere
 

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/halo_merger_tree.py
--- a/doc/source/cookbook/halo_merger_tree.py
+++ b/doc/source/cookbook/halo_merger_tree.py
@@ -12,7 +12,7 @@
 from yt.analysis_modules.halo_merger_tree.api import *
 
 # Makes a TimeSeries object from all of whatever files you have
-ts = TimeSeriesData.from_filenames("enzo_tiny_cosmology/DD????/DD????")
+ts = DatasetSeries.from_filenames("enzo_tiny_cosmology/DD????/DD????")
 
 # For each datadump in our timeseries, run the friends of friends
 # halo finder on it (this has only been tested with FOF currently).

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/multi_plot_3x2_FRB.py
--- a/doc/source/cookbook/multi_plot_3x2_FRB.py
+++ b/doc/source/cookbook/multi_plot_3x2_FRB.py
@@ -28,7 +28,7 @@
 # over the columns, which will become axes of slicing.
 plots = []
 for ax in range(3):
-    sli = pf.h.slice(ax, c[ax])
+    sli = pf.slice(ax, c[ax])
     frb = sli.to_frb(width, res)
     den_axis = axes[ax][0]
     temp_axis = axes[ax][1]

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/multi_plot_slice_and_proj.py
--- a/doc/source/cookbook/multi_plot_slice_and_proj.py
+++ b/doc/source/cookbook/multi_plot_slice_and_proj.py
@@ -16,9 +16,9 @@
 #   bw is the base-width in inches, but 4 is about right for most cases.
 fig, axes, colorbars = get_multi_plot(3, 2, colorbar=orient, bw = 4)
 
-slc = pf.h.slice(2, 0.0, fields=["density","temperature","velocity_magnitude"], 
+slc = pf.slice(2, 0.0, fields=["density","temperature","velocity_magnitude"], 
                  center=pf.domain_center)
-proj = pf.h.proj(2, "density", weight_field="density", center=pf.domain_center)
+proj = pf.proj(2, "density", weight_field="density", center=pf.domain_center)
 
 slc_frb = slc.to_frb((1.0, "mpc"), 512)
 proj_frb = proj.to_frb((1.0, "mpc"), 512)

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/profile_with_variance.py
--- a/doc/source/cookbook/profile_with_variance.py
+++ b/doc/source/cookbook/profile_with_variance.py
@@ -6,7 +6,7 @@
 pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
 # Create a sphere of radius 1000 kpc centered on the max density.
-sphere = pf.h.sphere("max", (1000, "kpc"))
+sphere = pf.sphere("max", (1000, "kpc"))
 
 # Calculate and store the bulk velocity for the sphere.
 bulk_velocity = sphere.quantities['BulkVelocity']()

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/rad_velocity.py
--- a/doc/source/cookbook/rad_velocity.py
+++ b/doc/source/cookbook/rad_velocity.py
@@ -5,7 +5,7 @@
 
 # Get the first sphere
 
-sphere0 = pf.h.sphere(pf.domain_center, (500., "kpc"))
+sphere0 = pf.sphere(pf.domain_center, (500., "kpc"))
 
 # Compute the bulk velocity from the cells in this sphere
 
@@ -13,7 +13,7 @@
 
 # Get the second sphere
 
-sphere1 = pf.h.sphere(pf.domain_center, (500., "kpc"))
+sphere1 = pf.sphere(pf.domain_center, (500., "kpc"))
 
 # Set the bulk velocity field parameter 
 sphere1.set_field_parameter("bulk_velocity", bulk_vel)

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/radial_profile_styles.py
--- a/doc/source/cookbook/radial_profile_styles.py
+++ b/doc/source/cookbook/radial_profile_styles.py
@@ -5,7 +5,7 @@
 
 # Get a sphere object
 
-sphere = pf.h.sphere(pf.domain_center, (500., "kpc"))
+sphere = pf.sphere(pf.domain_center, (500., "kpc"))
 
 # Bin up the data from the sphere into a radial profile
 

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/save_profiles.py
--- a/doc/source/cookbook/save_profiles.py
+++ b/doc/source/cookbook/save_profiles.py
@@ -6,7 +6,7 @@
 
 # Get a sphere
 
-sp = pf.h.sphere(pf.domain_center, (500., "kpc"))
+sp = pf.sphere(pf.domain_center, (500., "kpc"))
 
 # Radial profile from the sphere
 

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/simple_off_axis_projection.py
--- a/doc/source/cookbook/simple_off_axis_projection.py
+++ b/doc/source/cookbook/simple_off_axis_projection.py
@@ -6,7 +6,7 @@
 # Create a 1 kpc radius sphere, centered on the max density.  Note that this
 # sphere is very small compared to the size of our final plot, and it has a
 # non-axially aligned L vector.
-sp = pf.h.sphere("center", (15.0, "kpc"))
+sp = pf.sphere("center", (15.0, "kpc"))
 
 # Get the angular momentum vector for the sphere.
 L = sp.quantities["AngularMomentumVector"]()

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/simple_phase.py
--- a/doc/source/cookbook/simple_phase.py
+++ b/doc/source/cookbook/simple_phase.py
@@ -4,7 +4,7 @@
 pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
 # Create a sphere of radius 100 kpc in the center of the domain.
-my_sphere = pf.h.sphere("c", (100.0, "kpc"))
+my_sphere = pf.sphere("c", (100.0, "kpc"))
 
 # Create a PhasePlot object.
 # Setting weight to None will calculate a sum.

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/simple_profile.py
--- a/doc/source/cookbook/simple_profile.py
+++ b/doc/source/cookbook/simple_profile.py
@@ -6,7 +6,7 @@
 # Create a 1D profile within a sphere of radius 100 kpc
 # of the average temperature and average velocity_x 
 # vs. density, weighted by mass.
-sphere = pf.h.sphere("c", (100., "kpc"))
+sphere = pf.sphere("c", (100., "kpc"))
 plot = ProfilePlot(sphere, "density", ["temperature", "velocity_x"],
                    weight_field="cell_mass")
 

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/simple_radial_profile.py
--- a/doc/source/cookbook/simple_radial_profile.py
+++ b/doc/source/cookbook/simple_radial_profile.py
@@ -4,7 +4,7 @@
 pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
 # Create a sphere of radius 100 kpc in the center of the box.
-my_sphere = pf.h.sphere("c", (100.0, "kpc"))
+my_sphere = pf.sphere("c", (100.0, "kpc"))
 
 # Create a profile of the average density vs. radius.
 plot = ProfilePlot(my_sphere, "Radiuskpc", "density",

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/streamlines_isocontour.py
--- a/doc/source/cookbook/streamlines_isocontour.py
+++ b/doc/source/cookbook/streamlines_isocontour.py
@@ -21,8 +21,8 @@
     ax.plot3D(stream[:,0], stream[:,1], stream[:,2], alpha=0.1)
 
 
-sphere = pf.h.sphere("max", (1.0, "mpc"))
-surface = pf.h.surface(sphere, "density", 1e-24)
+sphere = pf.sphere("max", (1.0, "mpc"))
+surface = pf.surface(sphere, "density", 1e-24)
 colors = apply_colormap(np.log10(surface["temperature"]), cmap_name="hot")
 
 p3dc = Poly3DCollection(surface.triangles, linewidth=0.0)

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/sum_mass_in_sphere.py
--- a/doc/source/cookbook/sum_mass_in_sphere.py
+++ b/doc/source/cookbook/sum_mass_in_sphere.py
@@ -4,7 +4,7 @@
 pf = load("Enzo_64/DD0029/data0029")
 
 # Create a 1 Mpc radius sphere, centered on the max density.
-sp = pf.h.sphere("max", (1.0, "mpc"))
+sp = pf.sphere("max", (1.0, "mpc"))
 
 # Use the TotalQuantity derived quantity to sum up the
 # values of the cell_mass and particle_mass fields

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/surface_plot.py
--- a/doc/source/cookbook/surface_plot.py
+++ b/doc/source/cookbook/surface_plot.py
@@ -4,8 +4,8 @@
 from yt.mods import *
 
 pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
-sphere = pf.h.sphere("max", (1.0, "mpc"))
-surface = pf.h.surface(sphere, "density", 1e-25)
+sphere = pf.sphere("max", (1.0, "mpc"))
+surface = pf.surface(sphere, "density", 1e-25)
 colors = apply_colormap(np.log10(surface["temperature"]), cmap_name="hot")
 
 fig = plt.figure()

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/thin_slice_projection.py
--- a/doc/source/cookbook/thin_slice_projection.py
+++ b/doc/source/cookbook/thin_slice_projection.py
@@ -22,7 +22,7 @@
 left_corner[0] = center[0] + 0.5 * depth / pf.units['mpc']
 
 # Create the region
-region = pf.h.region(center, left_corner, right_corner)
+region = pf.region(center, left_corner, right_corner)
 
 # Create a density projection and supply the region we have just created.
 # Only cells within the region will be included in the projection.

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/time_series.py
--- a/doc/source/cookbook/time_series.py
+++ b/doc/source/cookbook/time_series.py
@@ -13,7 +13,7 @@
 
 # Construct the time series object
 
-ts = TimeSeriesData.from_filenames(fns)
+ts = DatasetSeries.from_filenames(fns)
 
 storage = {}
 
@@ -21,7 +21,7 @@
 # Alternately, you could just iterate "for pf in ts:" and directly append to
 # times and entrs.
 for sto, pf in ts.piter(storage=storage):
-    sphere = pf.h.sphere("c", (100., "kpc"))
+    sphere = pf.sphere("c", (100., "kpc"))
     temp = sphere["temperature"]/keV
     dens = sphere["density"]/(m_p*mue)
     mgas = sphere["cell_mass"]

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/cookbook/zoomin_frames.py
--- a/doc/source/cookbook/zoomin_frames.py
+++ b/doc/source/cookbook/zoomin_frames.py
@@ -20,7 +20,7 @@
 # maximum and the number of items to generate.  It returns 10^power of each
 # item it generates.
 for i,v in enumerate(np.logspace(
-            0, np.log10(pf.h.get_smallest_dx()*min_dx), n_frames)):
+            0, np.log10(pf.index.get_smallest_dx()*min_dx), n_frames)):
     # We set our width as necessary for this frame ...
     p.set_width(v, '1')
     # ... and we save!

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/developing/creating_frontend.rst
--- a/doc/source/developing/creating_frontend.rst
+++ b/doc/source/developing/creating_frontend.rst
@@ -39,10 +39,10 @@
 To get started, make a new directory in ``yt/frontends`` with the name of your
 code -- you can start by copying into it the contents of the ``stream``
 directory, which is a pretty empty format. You'll then have to create a subclass
-of ``StaticOutput``. This subclass will need to handle conversion between the
+of ``Dataset``. This subclass will need to handle conversion between the
 different physical units and the code units; for the most part, the examples of
-``OrionStaticOutput`` and ``EnzoStaticOutput`` should be followed, but
-``ChomboStaticOutput``, as a slightly newer addition, can also be used as an
+``OrionDataset`` and ``EnzoDataset`` should be followed, but
+``ChomboDataset``, as a slightly newer addition, can also be used as an
 instructive example -- be sure to add an ``_is_valid`` classmethod that will
 verify if a filename is valid for that output type, as that is how "load" works.
 
@@ -73,7 +73,7 @@
 ^^^^^^^^^
 
 To set up data localization, an ``AMRHierarchy`` subclass must be added in the
-file ``data_structures.py``. The hierarchy object must override the following
+file ``data_structures.py``. The index object must override the following
 methods:
 
  * ``_detect_fields``: ``self.field_list`` must be populated as a list of
@@ -82,7 +82,7 @@
    ``AMRHierarchy`` subclasses.
  * ``_count_grids``: this must set self.num_grids to be the total number of
    grids in the simulation.
- * ``_parse_hierarchy``: this must fill in ``grid_left_edge``,
+ * ``_parse_index``: this must fill in ``grid_left_edge``,
    ``grid_right_edge``, ``grid_particle_count``, ``grid_dimensions`` and
    ``grid_levels`` with the appropriate information. Additionally, ``grids``
    must be an array of grid objects that already know their IDs.
@@ -93,7 +93,7 @@
    already know, this is where you make a guess at it.
  * ``_setup_derived_fields``: ``self.derived_field_list`` needs to be made a
    list of strings that correspond to all derived fields valid for this
-   hierarchy.
+   index.
 
 For the most part, the ``ChomboHierarchy`` should be the first place to look for
 hints on how to do this; ``EnzoHierarchy`` is also instructive.
@@ -110,9 +110,9 @@
     class ChomboGrid(AMRGridPatch):
         _id_offset = 0
         __slots__ = ["_level_id"]
-        def __init__(self, id, hierarchy, level = -1):
-            AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
-                                  hierarchy = hierarchy)
+        def __init__(self, id, index, level = -1):
+            AMRGridPatch.__init__(self, id, filename = index.index_filename,
+                                  index = index)
             self.Parent = []
             self.Children = []
             self.Level = level

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -306,7 +306,7 @@
       This is where interfaces to codes are created.  Within each subdirectory of
       yt/frontends/ there must exist the following files, even if empty:
 
-      * ``data_structures.py``, where subclasses of AMRGridPatch, StaticOutput
+      * ``data_structures.py``, where subclasses of AMRGridPatch, Dataset
         and AMRHierarchy are defined.
       * ``io.py``, where a subclass of IOHandler is defined.
       * ``misc.py``, where any miscellaneous functions or classes are defined.
@@ -417,7 +417,7 @@
 
    + Hard-coding parameter names that are the same as those in Enzo.  The
      following translation table should be of some help.  Note that the
-     parameters are now properties on a StaticOutput subclass: you access them
+     parameters are now properties on a Dataset subclass: you access them
      like ``pf.refine_by`` .
 
      - ``RefineBy `` => `` refine_by``

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:4a1cb9a60d5113fc4ca2172a69d5e5ebc5506d77928e9f39715060444dc8f8ed"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -51,7 +52,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.imods import *\n",
+      "from yt.mods import *\n",
       "from yt.utilities.physical_constants import cm_per_kpc, cm_per_mpc"
      ],
      "language": "python",
@@ -578,7 +579,7 @@
       "* Particles may be difficult to integrate.\n",
       "* Data must already reside in memory before loading it in to `yt`, whether it is generated at runtime or loaded from disk. \n",
       "* Some functions may behave oddly, and parallelism will be disappointing or non-existent in most cases.\n",
-      "* No consistency checks are performed on the hierarchy\n",
+      "* No consistency checks are performed on the index\n",
       "* Consistency between particle positions and grids is not checked; `load_amr_grids` assumes that particle positions associated with one grid are not bounded within another grid at a higher level, so this must be ensured by the user prior to loading the grid data. "
      ]
     }

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/examining/low_level_inspection.rst
--- a/doc/source/examining/low_level_inspection.rst
+++ b/doc/source/examining/low_level_inspection.rst
@@ -23,11 +23,11 @@
 yt organizes grids in a hierarchical fashion; a coarser grid that contains (or
 overlaps with) a finer grid is referred to as its parent.  yt organizes these
 only a single level of refinement at a time.  To access grids, the ``grids``
-attribute on a :class:`~yt.data_objects.hierarchy.AMRHierarchy` object.  (For
+attribute on a :class:`~yt.data_objects.index.AMRHierarchy` object.  (For
 fast operations, a number of additional arrays prefixed with ``grid`` are also
 available, such as ``grid_left_edges`` and so on.)  This returns an instance of
 :class:`~yt.data_objects.grid_patch.AMRGridPatch`, which can be queried for
-either data or hierarchy information.
+either data or index information.
 
 The :class:`~yt.data_objects.grid_patch.AMRGridPatch` object itself provides
 the following attributes:
@@ -42,13 +42,13 @@
    multiple a field by this attribute.
  * ``child_indices``: a mask of booleans, where False indicates no finer data
    is available.  This is essentially the inverse of ``child_mask``.
- * ``child_index_mask``: a mask of indices into the ``pf.h.grids`` array of the
+ * ``child_index_mask``: a mask of indices into the ``pf.index.grids`` array of the
    child grids.
  * ``LeftEdge``: the left edge, in native code coordinates, of this grid
  * ``RightEdge``: the right edge, in native code coordinates, of this grid
  * ``dds``: the width of a cell in this grid
  * ``id``: the id (not necessarily the index) of this grid.  Defined such that
-   subtracting the property ``_id_offset`` gives the index into ``pf.h.grids``.
+   subtracting the property ``_id_offset`` gives the index into ``pf.index.grids``.
  * ``NumberOfParticles``: the number of particles in this grid
  * ``OverlappingSiblings``: a list of sibling grids that this grid overlaps
    with.  Likely only defined for Octree-based codes.
@@ -64,7 +64,7 @@
 
 .. code-block:: python
 
-   g = pf.h.grids[1043]
+   g = pf.index.grids[1043]
    g2 = g.Children[1].Children[0]
    print g2.LeftEdge
 
@@ -84,16 +84,16 @@
 
 .. code-block:: python
 
-   g = pf.h.grids[1043]
+   g = pf.index.grids[1043]
    print g["density"]
    print g["density"].min()
 
-To access the raw data, you have to call the IO handler from the hierarchy
+To access the raw data, you have to call the IO handler from the index
 instead.  This is somewhat more low-level.
 
 .. code-block:: python
 
-   g = pf.h.grids[1043]
+   g = pf.index.grids[1043]
    rho = pf.h.io.pop(g, "density")
 
 This field will be the raw data found in the file.
@@ -107,12 +107,12 @@
 specific point*.  While there are several ways to find out the answer to this
 question, a few helper routines are provided as well.  To identify the
 finest-resolution (i.e., most canonical) data at a given point, use
-:meth:`~yt.data_objects.hierarchy.AMRHierarchy.find_field_value_at_point`.
+:meth:`~yt.data_objects.index.AMRHierarchy.find_field_value_at_point`.
 This accepts a position (in coordinates of the domain) and returns the field
 values for one or multiple fields.
 
 To identify all the grids that intersect a given point, the function 
-:meth:`~yt.data_objects.hierarchy.AMRHierarchy.find_point` will return indices
+:meth:`~yt.data_objects.index.AMRHierarchy.find_point` will return indices
 and objects that correspond to it.  For instance:
 
 .. code-block:: python
@@ -144,7 +144,7 @@
 
    from yt.mods import *
    pf = load('Enzo_64/DD0043/data0043')
-   all_data_level_0 = pf.h.covering_grid(level=0, left_edge=[0,0.0,0.0], 
+   all_data_level_0 = pf.covering_grid(level=0, left_edge=[0,0.0,0.0], 
                                          dims=[64, 64, 64])
 
 Note that we can also get the same result and rely on the dataset to know 
@@ -152,7 +152,7 @@
 
 .. code-block:: python
 
-   all_data_level_0 = pf.h.covering_grid(level=0, left_edge=[0,0.0,0.0], 
+   all_data_level_0 = pf.covering_grid(level=0, left_edge=[0,0.0,0.0], 
                                          dims=pf.domain_dimensions)
 
 We can now access our underlying data at the lowest level by specifying what
@@ -184,7 +184,7 @@
 
 .. code-block:: python
 
-   all_data_level_2 = pf.h.covering_grid(level=2, left_edge=[0,0.0,0.0], 
+   all_data_level_2 = pf.covering_grid(level=2, left_edge=[0,0.0,0.0], 
                                          dims=pf.domain_dimensions * 2**2)
 
 And let's see what's the density in the central location:
@@ -209,7 +209,7 @@
 
 .. code-block:: python
 
-   all_data_level_2_s = pf.h.smoothed_covering_grid(2, [0.0, 0.0, 0.0], 
+   all_data_level_2_s = pf.smoothed_covering_grid(2, [0.0, 0.0, 0.0], 
                                                     pf.domain_dimensions * 2**2)
 
    print all_data_level_2_s['density'].shape

diff -r e65977431a23bac97af6650241e82cb67f901ec7 -r 8e863dbbd762849394970c5e84319167e06c7086 doc/source/examining/supported_frontends_data.rst
--- a/doc/source/examining/supported_frontends_data.rst
+++ b/doc/source/examining/supported_frontends_data.rst
@@ -21,7 +21,7 @@
 
    DD0010/
    DD0010/data0010
-   DD0010/data0010.hierarchy
+   DD0010/data0010.index
    DD0010/data0010.cpu0000
    DD0010/data0010.cpu0001
    DD0010/data0010.cpu0002

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/cba165c9936e/
Changeset:   cba165c9936e
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-03-15 04:37:52
Summary:     Making set_font respect defaults better.  Adding set_font_size.
Affected #:  1 file

diff -r 8e863dbbd762849394970c5e84319167e06c7086 -r cba165c9936ee1ec2a2927640b7b0b672e443dce yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -319,10 +319,30 @@
             font_dict = {}
         if 'color' in font_dict:
             self._font_color = font_dict.pop('color')
+        # Set default values if the user does not explicitly set them.
+        # this prevents reverting to the matplotlib defaults.
+        if 'family' not in font_dict:
+            font_dict['family'] = 'stixgeneral'
+        if 'size' not in font_dict:
+            font_dict['size'] = 18
         self._font_properties = \
             FontProperties(**font_dict)
         return self
 
+    def set_font_size(self, size):
+        """Set the size of the font used in the plot
+
+        This sets the font size by calling the set_font function.  See set_font
+        for more font customization options.
+
+        Parameters
+        ----------
+        size : float
+        The absolute size of the font in points (1 pt = 1/72 inch).
+
+        """
+        return self.set_font({'size': size})
+
     @invalidate_plot
     def set_cmap(self, field, cmap):
         """set the colormap for one of the fields


https://bitbucket.org/yt_analysis/yt/commits/2be4582f33f3/
Changeset:   2be4582f33f3
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-03-15 04:38:50
Summary:     Refactoring, cleaning up.

This also fixes an issue with colorbar and axis toggling not persisting when
plots get modified.
Affected #:  3 files

diff -r cba165c9936ee1ec2a2927640b7b0b672e443dce -r 2be4582f33f34abdadfdd957ff64edd8e5bdad62 yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -119,6 +119,93 @@
         f.seek(0)
         return f.read()
 
+    def _get_best_layout(self):
+        if self._draw_colorbar:
+            cb_size = self._cb_size
+            cb_text_size = self._ax_text_size[1] + 0.45
+        else:
+            cb_size = 0.0
+            cb_text_size = 0.0
+
+        if self._draw_axes:
+            x_axis_size = self._ax_text_size[0]
+            y_axis_size = self._ax_text_size[1]
+        else:
+            x_axis_size = 0.0
+            y_axis_size = 0.0
+
+        if self._draw_axes or self._draw_colorbar:
+            top_buff_size = self._top_buff_size
+        else:
+            top_buff_size = 0.0
+
+        # Ensure the figure size along the long axis is always equal to _figure_size
+        if self._aspect >= 1.0:
+            x_fig_size = self._figure_size
+            y_fig_size = self._figure_size/self._aspect
+        if self._aspect < 1.0:
+            x_fig_size = self._figure_size*self._aspect
+            y_fig_size = self._figure_size
+
+        xbins = np.array([x_axis_size, x_fig_size, cb_size, cb_text_size])
+        ybins = np.array([y_axis_size, y_fig_size, top_buff_size])
+
+        size = [xbins.sum(), ybins.sum()]
+
+        x_frac_widths = xbins/size[0]
+        y_frac_widths = ybins/size[1]
+
+        axrect = (
+            x_frac_widths[0],
+            y_frac_widths[0],
+            x_frac_widths[1],
+            y_frac_widths[1],
+        )
+
+        caxrect = (
+            x_frac_widths[0]+x_frac_widths[1],
+            y_frac_widths[0],
+            x_frac_widths[2],
+            y_frac_widths[1],
+        )
+
+        return size, axrect, caxrect
+
+    def _toggle_axes(self, choice):
+        self._draw_axes = choice
+        self.axes.get_xaxis().set_visible(choice)
+        self.axes.get_yaxis().set_visible(choice)
+        self.axes.set_frame_on(choice)
+        size, axrect, caxrect = self._get_best_layout()
+        self.axes.set_position(axrect)
+        self.cax.set_position(caxrect)
+        self.figure.set_size_inches(*size)
+
+    def _toggle_colorbar(self, choice):
+        self._draw_colorbar = choice
+        self.cax.set_visible(choice)
+        self.cax.set_frame_on(choice)
+        size, axrect, caxrect = self._get_best_layout()
+        self.axes.set_position(axrect)
+        self.cax.set_position(caxrect)
+        self.figure.set_size_inches(*size)
+
+    def hide_axes(self):
+        self._toggle_axes(False)
+        return self
+
+    def show_axes(self):
+        self._toggle_axes(True)
+        return self
+
+    def hide_colorbar(self):
+        self._toggle_colorbar(False)
+        return self
+
+    def show_colorbar(self):
+        self._toggle_colorbar(True)
+        return self
+
 def get_multi_plot(nx, ny, colorbar = 'vertical', bw = 4, dpi=300,
                    cbar_padding = 0.4):
     r"""Construct a multiple axes plot object, with or without a colorbar, into

diff -r cba165c9936ee1ec2a2927640b7b0b672e443dce -r 2be4582f33f34abdadfdd957ff64edd8e5bdad62 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -239,7 +239,7 @@
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True,
                  periodic=True, origin='center-window', oblique=False,
-                 window_size=8.0, fields=None, fontsize=18, setup=False):
+                 window_size=6.0, fields=None, fontsize=18, setup=False):
         if not hasattr(self, "pf"):
             self.pf = data_source.pf
             ts = self._initialize_dataset(self.pf)
@@ -707,7 +707,11 @@
             fig = None
             axes = None
             cax = None
-            if self.plots.has_key(f):
+            draw_colorbar = True
+            draw_axes = True
+            if f in self.plots:
+                draw_colorbar = self.plots[f]._draw_colorbar
+                draw_axes = self.plots[f]._draw_axes
                 if self.plots[f].figure is not None:
                     fig = self.plots[f].figure
                     axes = self.plots[f].axes
@@ -792,6 +796,9 @@
 
             self.run_callbacks(f)
 
+            self.plots[f]._toggle_axes(draw_axes)
+            self.plots[f]._toggle_colorbar(draw_colorbar)
+
             if self._font_color is not None:
                 ax = self.plots[f].axes
                 cbax = self.plots[f].cb.ax
@@ -1518,23 +1525,31 @@
         else:
             self._field_transform[field] = linear_transform
 
+
 class WindowPlotMPL(ImagePlotMPL):
     def __init__(self, data, cbname, cmap, extent, zlim, figure_size, fontsize,
-                 aspect, figure, axes, cax):
+                 unit_aspect, figure, axes, cax):
         self._draw_colorbar = True
         self._draw_axes = True
         self._fontsize = fontsize
-        self._fontscale = float(fontsize) / 18.0
         self._figure_size = figure_size
-        self._extent = extent
 
-        # set default layout
+        # Compute layout
+        fontscale = float(fontsize) / 18.0
+        if fontscale < 1.0:
+            fontscale = np.sqrt(fontscale)
+
+        self._cb_size = 0.0375*figure_size
+        self._ax_text_size = [0.9*fontscale, 0.7*fontscale]
+        self._top_buff_size = 0.30*fontscale
+        self._aspect = ((extent[1] - extent[0])/(extent[3] - extent[2]))
+
         size, axrect, caxrect = self._get_best_layout()
 
         super(WindowPlotMPL, self).__init__(
             size, axrect, caxrect, zlim, figure, axes, cax)
 
-        self._init_image(data, cbname, cmap, extent, aspect)
+        self._init_image(data, cbname, cmap, extent, unit_aspect)
 
         self.image.axes.ticklabel_format(scilimits=(-2, 3))
         if cbname == 'linear':
@@ -1542,92 +1557,6 @@
             self.cb.formatter.set_powerlimits((-2, 3))
             self.cb.update_ticks()
 
-    def _toggle_axes(self, choice):
-        self._draw_axes = choice
-        self.axes.get_xaxis().set_visible(choice)
-        self.axes.get_yaxis().set_visible(choice)
-        size, axrect, caxrect = self._get_best_layout()
-        self.axes.set_position(axrect)
-        self.cax.set_position(caxrect)
-        self.figure.set_size_inches(*size)
-
-    def _toggle_colorbar(self, choice):
-        self._draw_colorbar = choice
-        self.cax.set_visible(choice)
-        size, axrect, caxrect = self._get_best_layout()
-        self.axes.set_position(axrect)
-        self.cax.set_position(caxrect)
-        self.figure.set_size_inches(*size)
-
-    def hide_axes(self):
-        self._toggle_axes(False)
-        return self
-
-    def show_axes(self):
-        self._toggle_axes(True)
-        return self
-
-    def hide_colorbar(self):
-        self._toggle_colorbar(False)
-        return self
-
-    def show_colorbar(self):
-        self._toggle_colorbar(True)
-        return self
-
-    def _get_best_layout(self):
-        norm_size = self._figure_size/8.
-
-        if self._draw_colorbar:
-            cb_frac = .3*norm_size
-            cb_text_frac = 1.2*norm_size*self._fontscale
-        else:
-            cb_frac = 0.0
-            cb_text_frac = 0.0
-
-        if self._draw_axes:
-            x_ax_frac = 1.0*norm_size*self._fontscale
-            y_ax_frac = 0.75*norm_size*self._fontscale
-            top_buff = 0.3*norm_size
-        else:
-            x_ax_frac = 0.0
-            y_ax_frac = 0.0
-            top_buff = 0.0
-
-        extent = self._extent
-        aspect = (extent[1] - extent[0])/(extent[3] - extent[2])
-
-        # Ensure the figsize along the long axis is always equal to _figure_size
-        if aspect >= 1.0:
-            ximsize = self._figure_size
-            yimsize = self._figure_size/aspect
-        if aspect < 1.0:
-            ximsize = self._figure_size*aspect
-            yimsize = self._figure_size
-
-        xbins = np.array([x_ax_frac, ximsize, cb_frac, cb_text_frac])
-        ybins = np.array([y_ax_frac, yimsize, top_buff])
-
-        size = [xbins.sum(), ybins.sum()]
-
-        x_frac_widths = xbins/size[0]
-        y_frac_widths = ybins/size[1]
-
-        axrect = (
-            x_frac_widths[0],
-            y_frac_widths[0],
-            x_frac_widths[1],
-            y_frac_widths[1],
-        )
-
-        caxrect = (
-            x_frac_widths[0]+x_frac_widths[1],
-            y_frac_widths[0],
-            x_frac_widths[2],
-            y_frac_widths[1],
-        )
-
-        return size, axrect, caxrect
 
 def SlicePlot(pf, normal=None, fields=None, axis=None, *args, **kwargs):
     r"""

diff -r cba165c9936ee1ec2a2927640b7b0b672e443dce -r 2be4582f33f34abdadfdd957ff64edd8e5bdad62 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -17,29 +17,20 @@
 import base64
 import types
 
-from collections import defaultdict
 from functools import wraps
-from itertools import izip, repeat
+from itertools import izip
 import matplotlib
 import numpy as np
 import cStringIO
 import __builtin__
 
-from matplotlib.font_manager import FontProperties
 
-from .plot_window import WindowPlotMPL
 from .base_plot_types import ImagePlotMPL
 from .plot_container import \
     ImagePlotContainer, \
     log_transform, linear_transform
-from .image_writer import \
-    write_image, apply_colormap
 from yt.data_objects.profiles import \
-     create_profile
-from yt.utilities.png_writer import write_png_to_string
-from yt.data_objects.profiles import \
-    BinnedProfile1D, \
-    BinnedProfile2D
+    create_profile
 from yt.utilities.logger import ytLogger as mylog
 import _mpl_imports as mpl
 from yt.funcs import \
@@ -612,7 +603,7 @@
                accumulation=accumulation,
                fractional=fractional)
         self.profile = profile
-        ImagePlotContainer.__init__(self, data_source, figure_size, fontsize)
+        super(PhasePlot, self).__init__(data_source, figure_size, fontsize)
         # This is a fallback, in case we forget.
         self._setup_plots()
         self._initfinished = True
@@ -675,47 +666,56 @@
             fig = None
             axes = None
             cax = None
+            draw_colorbar = True
+            draw_axes = True
             if f in self.plots:
+                draw_colorbar = self.plots[f]._draw_colorbar
+                draw_axes = self.plots[f]._draw_axes
                 if self.plots[f].figure is not None:
                     fig = self.plots[f].figure
                     axes = self.plots[f].axes
                     cax = self.plots[f].cax
 
-            size = (self.figure_size, self.figure_size)
             x_scale, y_scale, z_scale = self._get_field_log(f, self.profile)
             x_title, y_title, z_title = self._get_field_title(f, self.profile)
+
             if z_scale == 'log':
                 zmin = data[data > 0.0].min()
+                self._field_transform[f] = log_transform
             else:
                 zmin = data.min()
+                self._field_transform[f] = linear_transform
             zlim = [zmin, data.max()]
-            
+
             fp = self._font_properties
             f = self.profile.data_source._determine_fields(f)[0]
-            self.plots[f] = PhasePlotMPL(self.profile.x, self.profile.y, data, 
+
+            self.plots[f] = PhasePlotMPL(self.profile.x, self.profile.y, data,
                                          x_scale, y_scale, z_scale,
-                                         self._colormaps[f], np.array(zlim), 
-                                         size, fp.get_size(),
+                                         self._colormaps[f], np.array(zlim),
+                                         self.figure_size, fp.get_size(),
                                          fig, axes, cax)
-            self.plots[f].axes.xaxis.set_label_text(x_title)
-            self.plots[f].axes.yaxis.set_label_text(y_title)
-            self.plots[f].cax.yaxis.set_label_text(z_title)
-            if z_scale == "log":
-                self._field_transform[f] = log_transform
-            else:
-                self._field_transform[f] = linear_transform
+
+            self.plots[f]._toggle_axes(draw_axes)
+            self.plots[f]._toggle_colorbar(draw_colorbar)
+
+            self.plots[f].axes.xaxis.set_label_text(x_title, fontproperties=fp)
+            self.plots[f].axes.yaxis.set_label_text(y_title, fontproperties=fp)
+            self.plots[f].cax.yaxis.set_label_text(z_title, fontproperties=fp)
+
             if f in self.plot_title:
                 self.plots[f].axes.set_title(self.plot_title[f])
 
-            if self._font_color is not None:
-                ax = self.plots[f].axes
-                cbax = self.plots[f].cb.ax
-                labels = \
-                  ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels() + \
-                  cbax.yaxis.get_ticklabels() + \
-                  [ax.xaxis.label, ax.yaxis.label, cbax.yaxis.label]
-                for label in labels:
+            ax = self.plots[f].axes
+            cbax = self.plots[f].cb.ax
+            labels = ((ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels() +
+                       cbax.yaxis.get_ticklabels()) +
+                      [ax.xaxis.label, ax.yaxis.label, cbax.yaxis.label])
+            for label in labels:
+                label.set_fontproperties(fp)
+                if self._font_color is not None:
                     label.set_color(self._font_color)
+
         self._plot_valid = True
 
     def save(self, name=None, mpl_kwargs=None):
@@ -835,35 +835,37 @@
     def setup_callbacks(self, *args):
         raise NotImplementedError
 
-class PhasePlotMPL(WindowPlotMPL):
-    def __init__(self, x_data, y_data, data, 
+
+class PhasePlotMPL(ImagePlotMPL):
+    def __init__(self, x_data, y_data, data,
                  x_scale, y_scale, z_scale, cmap,
-                 zlim, size, fontsize, figure, axes, cax):
+                 zlim, figure_size, fontsize, figure, axes, cax):
         self._initfinished = False
         self._draw_colorbar = True
         self._draw_axes = True
-        self._cache_layout(size, fontsize)
-
-        # Make room for a colorbar
-        self.input_size = size
-        self.fsize = [size[0] + self._cbar_inches[self._draw_colorbar], size[1]]
+        self._figure_size = figure_size
 
         # Compute layout
-        axrect, caxrect = self._get_best_layout(fontsize=fontsize)
-        if np.any(np.array(axrect) < 0):
-            mylog.warning('The axis ratio of the requested plot is very narrow.  '
-                          'There is a good chance the plot will not look very good, '
-                          'consider making the plot manually using FixedResolutionBuffer '
-                          'and matplotlib.')
-            axrect  = (0.07, 0.10, 0.80, 0.80)
-            caxrect = (0.87, 0.10, 0.04, 0.80)
-        ImagePlotMPL.__init__(self, self.fsize, axrect, caxrect, zlim,
-                              figure, axes, cax)
+        fontscale = float(fontsize) / 18.0
+        if fontscale < 1.0:
+            fontscale = np.sqrt(fontscale)
+
+        self._cb_size = 0.0375*figure_size
+        self._ax_text_size = [1.1*fontscale, 0.9*fontscale]
+        self._top_buff_size = 0.30*fontscale
+        self._aspect = 1.0
+
+        size, axrect, caxrect = self._get_best_layout()
+
+        super(PhasePlotMPL, self).__init__(size, axrect, caxrect, zlim,
+                                           figure, axes, cax)
+
         self._init_image(x_data, y_data, data, x_scale, y_scale, z_scale,
                          zlim, cmap)
+
         self._initfinished = True
 
-    def _init_image(self, x_data, y_data, image_data, 
+    def _init_image(self, x_data, y_data, image_data,
                     x_scale, y_scale, z_scale, zlim, cmap):
         """Store output of imshow in image variable"""
         if (z_scale == 'log'):


https://bitbucket.org/yt_analysis/yt/commits/2df85a44c2ae/
Changeset:   2df85a44c2ae
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-03-15 04:53:24
Summary:     Don't hide the colorbar axis frame.
Affected #:  1 file

diff -r 2be4582f33f34abdadfdd957ff64edd8e5bdad62 -r 2df85a44c2ae69705a01dea68713a00652dc4335 yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -184,7 +184,6 @@
     def _toggle_colorbar(self, choice):
         self._draw_colorbar = choice
         self.cax.set_visible(choice)
-        self.cax.set_frame_on(choice)
         size, axrect, caxrect = self._get_best_layout()
         self.axes.set_position(axrect)
         self.cax.set_position(caxrect)


https://bitbucket.org/yt_analysis/yt/commits/bd41e2fe9b3a/
Changeset:   bd41e2fe9b3a
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-03-15 04:55:35
Summary:     Make the default PhasePlot figure size 6 inches.
Affected #:  1 file

diff -r 2df85a44c2ae69705a01dea68713a00652dc4335 -r bd41e2fe9b3a0fdf0ce87e9f812b236f205e3105 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -557,7 +557,7 @@
         Default: "black"
     figure_size : int
         Size in inches of the image.
-        Default: 10 (10x10)
+        Default: 6 (6x6)
 
     Examples
     --------
@@ -586,7 +586,7 @@
     def __init__(self, data_source, x_field, y_field, z_fields,
                  weight_field="cell_mass", x_bins=128, y_bins=128,
                  accumulation=False, fractional=False,
-                 profile=None, fontsize=18, font_color="black", figure_size=8.0):
+                 profile=None, fontsize=18, font_color="black", figure_size=6.0):
         self.plot_title = {}
         self.z_log = {}
         self.z_title = {}


https://bitbucket.org/yt_analysis/yt/commits/c0cac851f2f0/
Changeset:   c0cac851f2f0
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-03-17 20:20:05
Summary:     Merging with mainline development.
Affected #:  3 files

diff -r bd41e2fe9b3a0fdf0ce87e9f812b236f205e3105 -r c0cac851f2f02687f1d2b3e0a978f3e426c17c50 doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:4a1cb9a60d5113fc4ca2172a69d5e5ebc5506d77928e9f39715060444dc8f8ed"
+  "signature": "sha256:cd145d8cadbf1a0065d0f9fb4ea107c215fcd53245b3bb7d29303af46f063552"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -52,8 +52,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.mods import *\n",
-      "from yt.utilities.physical_constants import cm_per_kpc, cm_per_mpc"
+      "%matplotlib inline\n",
+      "from yt.mods import *"
      ],
      "language": "python",
      "metadata": {},
@@ -80,16 +80,16 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "To load this data into `yt`, we need to assign it a field name, in this case \"Density\", and place it into a dictionary. Then, we call `load_uniform_grid`:"
+      "To load this data into `yt`, we need associate it with a field. The `data` dictionary consists of one or more fields, each consisting of a tuple of a NumPy array and a unit string. Then, we can call `load_uniform_grid`:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "data = dict(Density = arr)\n",
+      "data = dict(density = (arr, \"g/cm**3\"))\n",
       "bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])\n",
-      "pf = load_uniform_grid(data, arr.shape, cm_per_mpc, bbox=bbox, nprocs=64)"
+      "ds = load_uniform_grid(data, arr.shape, length_unit=\"Mpc\", bbox=bbox, nprocs=64)"
      ],
      "language": "python",
      "metadata": {},
@@ -101,33 +101,38 @@
      "source": [
       "`load_uniform_grid` takes the following arguments and optional keywords:\n",
       "\n",
-      "* `data` : This is a dict of numpy arrays, where the keys are the field names.\n",
+      "* `data` : This is a dict of numpy arrays, where the keys are the field names\n",
       "* `domain_dimensions` : The domain dimensions of the unigrid\n",
-      "* `sim_unit_to_cm` : Conversion factor from simulation units to centimeters\n",
-      "* `bbox` : Size of computational domain in units sim_unit_to_cm\n",
+      "* `length_unit` : The unit that corresponds to `code_length`, can be a string, tuple, or floating-point number\n",
+      "* `bbox` : Size of computational domain in units of `code_length`\n",
       "* `nprocs` : If greater than 1, will create this number of subarrays out of data\n",
       "* `sim_time` : The simulation time in seconds\n",
+      "* `mass_unit` : The unit that corresponds to `code_mass`, can be a string, tuple, or floating-point number\n",
+      "* `time_unit` : The unit that corresponds to `code_time`, can be a string, tuple, or floating-point number\n",
+      "* `velocity_unit` : The unit that corresponds to `code_velocity`\n",
       "* `periodicity` : A tuple of booleans that determines whether the data will be treated as periodic along each axis\n",
       "\n",
-      "This example creates a `yt`-native parameter file `pf` that will treat your array as a\n",
-      "density field in cubic domain of 3 Mpc edge size (3 * 3.0856e24 cm) and\n",
-      "simultaneously divide the domain into `nprocs` = 64 chunks, so that you can take advantage\n",
-      "of the underlying parallelism. "
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The resulting `pf` functions exactly like a parameter file from any other dataset--it can be sliced, and we can show the grid boundaries:"
+      "This example creates a `yt`-native dataset `ds` that will treat your array as a\n",
+      "density field in cubic domain of 3 Mpc edge size and simultaneously divide the \n",
+      "domain into `nprocs` = 64 chunks, so that you can take advantage\n",
+      "of the underlying parallelism. \n",
+      "\n",
+      "The optional unit keyword arguments allow for the default units of the dataset to be set. They can be:\n",
+      "* A string, e.g. `length_unit=\"Mpc\"`\n",
+      "* A tuple, e.g. `mass_unit=(1.0e14, \"Msun\")`\n",
+      "* A floating-point value, e.g. `time_unit=3.1557e13`\n",
+      "\n",
+      "In the latter case, the unit is assumed to be cgs. \n",
+      "\n",
+      "The resulting `ds` functions exactly like a dataset like any other `yt` can handle--it can be sliced, and we can show the grid boundaries:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = SlicePlot(pf, 2, [\"Density\"])\n",
-      "slc.set_cmap(\"Density\", \"Blues\")\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"])\n",
+      "slc.set_cmap(\"density\", \"Blues\")\n",
       "slc.annotate_grids(cmap=None)\n",
       "slc.show()"
      ],
@@ -152,13 +157,14 @@
       "posx_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
       "posy_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
       "posz_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
-      "data = dict(Density = np.random.random(size=(64,64,64)), \n",
+      "data = dict(density = (np.random.random(size=(64,64,64)), \"Msun/kpc**3\"), \n",
       "            number_of_particles = 10000,\n",
-      "            particle_position_x = posx_arr, \n",
-      "\t        particle_position_y = posy_arr,\n",
-      "\t        particle_position_z = posz_arr)\n",
+      "            particle_position_x = (posx_arr, \"code_length\"), \n",
+      "\t        particle_position_y = (posy_arr, \"code_length\"),\n",
+      "\t        particle_position_z = (posz_arr, \"code_length\"))\n",
       "bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])\n",
-      "pf = load_uniform_grid(data, data[\"Density\"].shape, cm_per_mpc, bbox=bbox, nprocs=4)"
+      "ds = load_uniform_grid(data, data[\"density\"][0].shape, length_unit=(1.0, \"Mpc\"), mass_unit=(1.0,\"Msun\"), \n",
+      "                       bbox=bbox, nprocs=4)"
      ],
      "language": "python",
      "metadata": {},
@@ -176,8 +182,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = SlicePlot(pf, \"z\", [\"Density\"])\n",
-      "slc.set_cmap(\"Density\", \"Blues\")\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"])\n",
+      "slc.set_cmap(\"density\", \"Blues\")\n",
       "slc.annotate_particles(0.25, p_size=12.0, col=\"Red\")\n",
       "slc.show()"
      ],
@@ -207,6 +213,7 @@
       "import h5py\n",
       "from yt.config import ytcfg\n",
       "data_dir = ytcfg.get('yt','test_data_dir')\n",
+      "from yt.utilities.physical_ratios import cm_per_kpc\n",
       "f = h5py.File(data_dir+\"/UnigridData/turb_vels.h5\", \"r\") # Read-only access to the file"
      ],
      "language": "python",
@@ -234,16 +241,44 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We can iterate over the items in the file handle to get the data into a dictionary, which we will then load:"
+      "We need to add some unit information. It may be stored in the file somewhere, or we may know it from another source. In this case, the units are simply cgs:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "data = {k:v for k,v in f.items()}\n",
-      "bbox = np.array([[-0.5, 0.5], [-0.5, 0.5], [-0.5, 0.5]])\n",
-      "pf = load_uniform_grid(data, data[\"Density\"].shape, 250.*cm_per_kpc, bbox=bbox, nprocs=8, periodicity=(False,False,False))"
+      "units = [\"gauss\",\"gauss\",\"gauss\", \"g/cm**3\", \"erg/cm**3\", \"K\", \n",
+      "         \"cm/s\", \"cm/s\", \"cm/s\", \"cm/s\", \"cm/s\", \"cm/s\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can iterate over the items in the file handle and the units to get the data into a dictionary, which we will then load:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = {k:(v.value,u) for (k,v), u in zip(f.items(),units)}\n",
+      "bbox = np.array([[-0.5, 0.5], [-0.5, 0.5], [-0.5, 0.5]])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds = load_uniform_grid(data, data[\"Density\"][0].shape, length_unit=250.*cm_per_kpc, bbox=bbox, nprocs=8, \n",
+      "                       periodicity=(False,False,False))"
      ],
      "language": "python",
      "metadata": {},
@@ -260,7 +295,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prj = ProjectionPlot(pf, \"z\", [\"z-velocity\",\"Temperature\"], weight_field=\"Density\")\n",
+      "prj = ProjectionPlot(ds, \"z\", [\"z-velocity\",\"Temperature\",\"Bx\"], weight_field=\"Density\")\n",
+      "prj.set_log(\"z-velocity\", False)\n",
+      "prj.set_log(\"Bx\", False)\n",
       "prj.show()"
      ],
      "language": "python",
@@ -287,10 +324,10 @@
      "collapsed": false,
      "input": [
       "#Find the min and max of the field\n",
-      "mi, ma = pf.h.all_data().quantities[\"Extrema\"]('temperature')[0]\n",
+      "mi, ma = ds.all_data().quantities[\"Extrema\"]('Temperature')\n",
       "#Reduce the dynamic range\n",
-      "mi += 1.5e7\n",
-      "ma -= 0.81e7"
+      "mi = mi.value + 1.5e7\n",
+      "ma = ma.value - 0.81e7"
      ],
      "language": "python",
      "metadata": {},
@@ -327,9 +364,9 @@
       "# Choose a vector representing the viewing direction.\n",
       "L = [0.5, 0.5, 0.5]\n",
       "# Define the center of the camera to be the domain center\n",
-      "c = pf.domain_center\n",
+      "c = ds.domain_center[0]\n",
       "# Define the width of the image\n",
-      "W = 1.5*pf.domain_width\n",
+      "W = 1.5*ds.domain_width[0]\n",
       "# Define the number of pixels to render\n",
       "Npixels = 512 "
      ],
@@ -348,9 +385,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "cam = pf.h.camera(c, L, W, Npixels, tf, fields=['temperature'],\n",
-      "                  north_vector=[0,0,1], steady_north=True, \n",
-      "                  sub_samples=5, no_ghost=False,log_fields=False)\n",
+      "cam = ds.camera(c, L, W, Npixels, tf, fields=['Temperature'],\n",
+      "                north_vector=[0,0,1], steady_north=True, \n",
+      "                sub_samples=5, log_fields=[False])\n",
       "\n",
       "cam.transfer_function.map_to_colormap(mi,ma, \n",
       "                                      scale=15.0, colormap='algae')"
@@ -417,14 +454,17 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We can put it into a dictionary in the same way as before, but we slice the file handle `f` so that we don't use the `PrimaryHDU`. `hdu.name` is the field name and `hdu.data` is the actual data. We can check that we got the correct fields. "
+      "We can put it into a dictionary in the same way as before, but we slice the file handle `f` so that we don't use the `PrimaryHDU`. `hdu.name` is the field name and `hdu.data` is the actual data. Each of these velocity fields is in km/s. We can check that we got the correct fields. "
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "data = {hdu.name.lower():hdu.data for hdu in f[1:]}\n",
+      "data = {}\n",
+      "for hdu in f[1:]:\n",
+      "    name = hdu.name.lower()\n",
+      "    data[name] = (hdu.data,\"km/s\")\n",
       "print data.keys()"
      ],
      "language": "python",
@@ -435,15 +475,36 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Now we load the data into `yt`. This particular file doesn't have any coordinate information, but let's assume that the box size is a Mpc. Since these are velocity fields, we can overlay velocity vectors on slices, just as if we had loaded in data from a supported code. "
+      "The velocity field names in this case are slightly different than the standard `yt` field names for velocity fields, so we will reassign the field names:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load_uniform_grid(data, data[\"x-velocity\"].shape, cm_per_mpc)\n",
-      "slc = SlicePlot(pf, \"x\", [\"x-velocity\",\"y-velocity\",\"z-velocity\"])\n",
+      "data[\"velocity_x\"] = data.pop(\"x-velocity\")\n",
+      "data[\"velocity_y\"] = data.pop(\"y-velocity\")\n",
+      "data[\"velocity_z\"] = data.pop(\"z-velocity\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now we load the data into `yt`. Let's assume that the box size is a Mpc. Since these are velocity fields, we can overlay velocity vectors on slices, just as if we had loaded in data from a supported code. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds = load_uniform_grid(data, data[\"velocity_x\"][0].shape, length_unit=(1.0,\"Mpc\"))\n",
+      "slc = SlicePlot(ds, \"x\", [\"velocity_x\",\"velocity_y\",\"velocity_z\"])\n",
+      "for ax in \"xyz\":\n",
+      "    slc.set_log(\"velocity_%s\" % (ax), False)\n",
       "slc.annotate_velocity()\n",
       "slc.show()"
      ],
@@ -472,7 +533,7 @@
      "input": [
       "grid_data = [\n",
       "    dict(left_edge = [0.0, 0.0, 0.0],\n",
-      "         right_edge = [1.0, 1.0, 1.],\n",
+      "         right_edge = [1.0, 1.0, 1.0],\n",
       "         level = 0,\n",
       "         dimensions = [32, 32, 32]), \n",
       "    dict(left_edge = [0.25, 0.25, 0.25],\n",
@@ -496,7 +557,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "for g in grid_data: g[\"Density\"] = np.random.random(g[\"dimensions\"]) * 2**g[\"level\"]"
+      "for g in grid_data: g[\"density\"] = np.random.random(g[\"dimensions\"]) * 2**g[\"level\"]"
      ],
      "language": "python",
      "metadata": {},
@@ -516,7 +577,7 @@
      "input": [
       "grid_data[0][\"number_of_particles\"] = 0 # Set no particles in the top-level grid\n",
       "grid_data[0][\"particle_position_x\"] = np.array([]) # No particles, so set empty arrays\n",
-      "grid_data[0][\"particle_position_y\"] = np.array([]) \n",
+      "grid_data[0][\"particle_position_y\"] = np.array([])\n",
       "grid_data[0][\"particle_position_z\"] = np.array([])\n",
       "grid_data[1][\"number_of_particles\"] = 1000\n",
       "grid_data[1][\"particle_position_x\"] = np.random.uniform(low=0.25, high=0.75, size=1000)\n",
@@ -531,6 +592,26 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
+      "We need to specify the field units in a `field_units` dict:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "field_units = {\"density\":\"code_mass/code_length**3\",\n",
+      "               \"particle_position_x\":\"code_length\",\n",
+      "               \"particle_position_y\":\"code_length\",\n",
+      "               \"particle_position_z\":\"code_length\",}"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
       "Then, call `load_amr_grids`:"
      ]
     },
@@ -538,7 +619,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)"
+      "ds = load_amr_grids(grid_data, [32, 32, 32], field_units=field_units)"
      ],
      "language": "python",
      "metadata": {},
@@ -548,14 +629,14 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "`load_amr_grids` also takes the same keywords `bbox` and `sim_time` as `load_uniform_grid`. Let's take a slice:"
+      "`load_amr_grids` also takes the same keywords `bbox` and `sim_time` as `load_uniform_grid`. We could have also specified the length, time, velocity, and mass units in the same manner as before. Let's take a slice:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = SlicePlot(pf, \"z\", [\"Density\"])\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"])\n",
       "slc.annotate_particles(0.25, p_size=15.0, col=\"Pink\")\n",
       "slc.show()"
      ],
@@ -579,7 +660,7 @@
       "* Particles may be difficult to integrate.\n",
       "* Data must already reside in memory before loading it in to `yt`, whether it is generated at runtime or loaded from disk. \n",
       "* Some functions may behave oddly, and parallelism will be disappointing or non-existent in most cases.\n",
-      "* No consistency checks are performed on the index\n",
+      "* No consistency checks are performed on the hierarchy\n",
       "* Consistency between particle positions and grids is not checked; `load_amr_grids` assumes that particle positions associated with one grid are not bounded within another grid at a higher level, so this must be ensured by the user prior to loading the grid data. "
      ]
     }

diff -r bd41e2fe9b3a0fdf0ce87e9f812b236f205e3105 -r c0cac851f2f02687f1d2b3e0a978f3e426c17c50 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -363,6 +363,8 @@
         rv = self.field_info.find_dependencies(new_fields)
 
     def add_particle_filter(self, filter):
+        # This requires an index
+        self.index
         # This is a dummy, which we set up to enable passthrough of "all"
         # concatenation fields.
         n = getattr(filter, "name", filter)

diff -r bd41e2fe9b3a0fdf0ce87e9f812b236f205e3105 -r c0cac851f2f02687f1d2b3e0a978f3e426c17c50 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -1127,7 +1127,7 @@
         px, py = self.convert_to_plot(plot,
                     [reg[field_x][gg][::self.stride],
                      reg[field_y][gg][::self.stride]])
-        plot._axes.scatter(px, py, edgecolors='None', marker=self.marker,
+        plot._axes.scatter(px.ndarray_view(), py.ndarray_view(), edgecolors='None', marker=self.marker,
                            s=self.p_size, c=self.color,alpha=self.alpha)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
@@ -1141,8 +1141,8 @@
         zax = axis
         LE[xax], RE[xax] = xlim
         LE[yax], RE[yax] = ylim
-        LE[zax] = data.center[zax] - self.width*0.5
-        RE[zax] = data.center[zax] + self.width*0.5
+        LE[zax] = data.center[zax].ndarray_view() - self.width*0.5
+        RE[zax] = data.center[zax].ndarray_view() + self.width*0.5
         if self.region is not None \
             and np.all(self.region.left_edge <= LE) \
             and np.all(self.region.right_edge >= RE):


https://bitbucket.org/yt_analysis/yt/commits/48dbebf4dc86/
Changeset:   48dbebf4dc86
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-03-17 20:23:43
Summary:     Reverting the default figure size to 8 inches.
Affected #:  2 files

diff -r c0cac851f2f02687f1d2b3e0a978f3e426c17c50 -r 48dbebf4dc86b6bb6bd88accbca5c61e369f0e13 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -239,7 +239,7 @@
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True,
                  periodic=True, origin='center-window', oblique=False,
-                 window_size=6.0, fields=None, fontsize=18, setup=False):
+                 window_size=8.0, fields=None, fontsize=18, setup=False):
         if not hasattr(self, "pf"):
             self.pf = data_source.pf
             ts = self._initialize_dataset(self.pf)

diff -r c0cac851f2f02687f1d2b3e0a978f3e426c17c50 -r 48dbebf4dc86b6bb6bd88accbca5c61e369f0e13 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -557,7 +557,7 @@
         Default: "black"
     figure_size : int
         Size in inches of the image.
-        Default: 6 (6x6)
+        Default: 8 (8x8)
 
     Examples
     --------
@@ -586,7 +586,7 @@
     def __init__(self, data_source, x_field, y_field, z_fields,
                  weight_field="cell_mass", x_bins=128, y_bins=128,
                  accumulation=False, fractional=False,
-                 profile=None, fontsize=18, font_color="black", figure_size=6.0):
+                 profile=None, fontsize=18, font_color="black", figure_size=8.0):
         self.plot_title = {}
         self.z_log = {}
         self.z_title = {}


https://bitbucket.org/yt_analysis/yt/commits/853ec46f9c58/
Changeset:   853ec46f9c58
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-03-17 20:25:50
Summary:     Making use of setdefault.
Affected #:  1 file

diff -r 48dbebf4dc86b6bb6bd88accbca5c61e369f0e13 -r 853ec46f9c5825bebe5bcdca0b548fb6bc173ab9 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -321,10 +321,8 @@
             self._font_color = font_dict.pop('color')
         # Set default values if the user does not explicitly set them.
         # this prevents reverting to the matplotlib defaults.
-        if 'family' not in font_dict:
-            font_dict['family'] = 'stixgeneral'
-        if 'size' not in font_dict:
-            font_dict['size'] = 18
+        font_dict.setdefault('family', 'stixgeneral')
+        font_dict.setdefault('size', 18)
         self._font_properties = \
             FontProperties(**font_dict)
         return self

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list