[yt-svn] commit/yt: 6 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Sep 23 10:00:00 PDT 2014


6 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/aa847dcf25e4/
Changeset:   aa847dcf25e4
Branch:      yt
User:        atmyers
Date:        2014-09-03 04:06:25+00:00
Summary:     adding particle plot stuff back in, on a different head
Affected #:  3 files

diff -r aa8a1f105a524391301a33342a3fa01918c7da44 -r aa847dcf25e44f5aecc68949b8a3af6144b89707 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -143,14 +143,14 @@
     apply_colormap, scale_image, write_projection, \
     SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
     ProjectionPlot, OffAxisProjectionPlot, \
-    show_colormaps, ProfilePlot, PhasePlot
+    show_colormaps, ProfilePlot, PhasePlot, ParticlePlot
 
 from yt.visualization.volume_rendering.api import \
     off_axis_projection, ColorTransferFunction, \
     TransferFunctionHelper
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    parallel_objects, enable_parallelism, communication_system
+    parallel_objects, enable_parallelism
 
 from yt.convenience import \
     load, simulation

diff -r aa8a1f105a524391301a33342a3fa01918c7da44 -r aa847dcf25e44f5aecc68949b8a3af6144b89707 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -52,6 +52,9 @@
     ProfilePlot, \
     PhasePlot
 
+from .particle_plotter import \
+    ParticlePlot
+    
 from .base_plot_types import \
     get_multi_plot
 

diff -r aa8a1f105a524391301a33342a3fa01918c7da44 -r aa847dcf25e44f5aecc68949b8a3af6144b89707 yt/visualization/particle_plotter.py
--- /dev/null
+++ b/yt/visualization/particle_plotter.py
@@ -0,0 +1,370 @@
+"""
+This is a simple mechanism for interfacing with Particle plots
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+
+import __builtin__
+import base64
+import types
+
+from functools import wraps
+from itertools import izip
+import matplotlib
+import numpy as np
+import cStringIO
+
+from yt.utilities.exceptions import \
+    YTNotInsideNotebook
+from yt.utilities.logger import ytLogger as mylog
+import _mpl_imports as mpl
+from yt.funcs import \
+    ensure_list, \
+    get_image_suffix, \
+    get_ipython_api_version
+from yt.units.unit_object import Unit
+from .profile_plotter import \
+    get_canvas, \
+    invalidate_plot, \
+    sanitize_label
+
+class ParticlePlot(object):
+    r"""
+    Create a particle scatter plot from a data source.
+
+    Given a data object (all_data, region, sphere, etc.), an x field, 
+    and a y field (both of particle type), this will create a scatter
+    plot with one marker for each particle.
+
+    Parameters
+    ----------
+    data_source : AMR3DData Object
+        The data object to be profiled, such as all_data, region, or 
+        sphere.
+    x_field : str
+        The field to plot on the x-axis.
+    y_fields : str
+        The field to plot on the y-axis.
+    plot_spec : dict or list of dicts
+        A dictionary or list of dictionaries containing plot keyword 
+        arguments.  This will be passed to pyplot.plot. 
+        For example, dict(c='r', marker='.').
+        Default: dict(c='b', marker='.', linestyle='None', markersize=8)
+
+    Examples
+    --------
+
+    >>> import yt
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>> ad = ds.all_data()
+    >>> plot = yt.ParticlePlot(ad, 'particle_position_x', 'particle_velocity_x')
+    >>> plot.save()
+
+    Use set_line_property to change line properties.
+    
+    """
+    x_log = None
+    y_log = None
+    z_log = None
+    x_title = None
+    y_title = None
+    x_lim = (None, None)
+    y_lim = (None, None)
+    _plot_valid = False
+
+    def __init__(self, data_source, x_field, y_field,
+                 label=None, plot_spec=None):
+
+        if plot_spec is None:
+            plot_spec = {'c':'b', 'marker':'.', 'linestyle':'None', 'markersize':8}
+
+        self.data_source = data_source
+        self.x_field = x_field
+        self.y_field = y_field
+        self.label = sanitize_label(label, 1)
+        self.plot_spec = plot_spec
+
+        self.x_data = self.data_source[x_field]
+        self.y_data = self.data_source[y_field]
+        
+        self.figure = mpl.matplotlib.figure.Figure((10, 8))
+        self.axis = self.figure.add_subplot(111)
+        self._setup_plots()
+
+    def save(self, name=None):
+        r"""
+         Saves the scatter plot to disk.
+
+         Parameters
+         ----------
+         name : str
+             The output file keyword.
+
+         """
+        if not self._plot_valid:
+            self._setup_plots()
+        if name is None:
+            prefix = self.data_source.ds
+            name = "%s.png" % prefix
+        suffix = get_image_suffix(name)
+        prefix = name[:name.rfind(suffix)]
+        xfn = self.x_field
+        if isinstance(xfn, types.TupleType):
+            xfn = xfn[1]
+        yfn = self.y_field
+        if isinstance(yfn, types.TupleType):
+            yfn = yfn[1]
+        if not suffix:
+            suffix = ".png"
+        canvas_cls = get_canvas(name)
+        canvas = canvas_cls(self.figure)
+        fn = "%s_ScatterPlot_%s_%s%s" % (prefix, xfn, yfn, suffix)
+        mylog.info("Saving %s", fn)
+        canvas.print_figure(fn)
+        return fn
+
+    def show(self):
+        r"""This will send any the plot to the IPython notebook.
+
+        If yt is being run from within an IPython session, and it is able to
+        determine this, this function will send the plot to the
+        notebook for display.
+
+        If yt can't determine if it's inside an IPython session, it will raise
+        YTNotInsideNotebook.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> pp = ParticlePlot(ds.all_data(), 'particle_position_x', 'particle_position_y')
+        >>> pp.show()
+
+        """
+        if "__IPYTHON__" in dir(__builtin__):
+            api_version = get_ipython_api_version()
+            if api_version in ('0.10', '0.11'):
+                self._send_zmq()
+            else:
+                from IPython.display import display
+                display(self)
+        else:
+            raise YTNotInsideNotebook
+
+    def _repr_html_(self):
+        """Return an html representation of the plot object. Will display as a
+        png for each WindowPlotMPL instance in self.plots"""
+        ret = ''
+        canvas = mpl.FigureCanvasAgg(self.figure)
+        f = cStringIO.StringIO()
+        canvas.print_figure(f)
+        f.seek(0)
+        img = base64.b64encode(f.read())
+        ret += '<img src="data:image/png;base64,%s"><br>' % img
+        return ret
+
+    def _setup_plots(self):
+        self.axis.cla()
+        self.axis.plot(np.array(self.x_data), np.array(self.y_data),
+                       label=self.label, **self.plot_spec)
+
+        xscale, yscale = self._get_axis_log()
+        xtitle, ytitle = self._get_axis_titles()
+
+        self.axis.set_xscale(xscale)
+        self.axis.set_yscale(yscale)
+
+        self.axis.set_xlabel(xtitle)
+        self.axis.set_ylabel(ytitle)
+
+        self.axis.set_xlim(*self.x_lim)
+        self.axis.set_ylim(*self.y_lim)
+
+        if any(self.label):
+            self.axis.legend(loc="best")
+
+        self._plot_valid = True
+
+    @invalidate_plot
+    def set_line_property(self, property, value):
+        r"""
+        Set properties for the line on the plot.
+
+        Parameters
+        ----------
+        property : str
+            The line property to be set.
+        value : str, int, float
+            The value to set for the line property.
+
+        Examples
+        --------
+
+        plot.set_line_property("marker", "+")
+
+        
+        """
+        specs = self.plot_spec
+        specs[property] = value
+        return self
+
+    @invalidate_plot
+    def set_xlog(self, log):
+        """set the x-axis to log or linear.
+
+        Parameters
+        ----------
+
+        log : boolean
+            Log on/off.
+        """
+        self.x_log = log
+        return self
+
+    @invalidate_plot
+    def set_ylog(self, log):
+        """set the y-axis to log or linear.
+
+        Parameters
+        ----------
+
+        log : boolean
+            Log on/off.
+        """
+        self.y_log = log
+        return self
+    
+
+    @invalidate_plot
+    def set_unit(self, field, unit):
+        """Sets a new unit for the requested field
+
+        Parameters
+        ----------
+        field : string
+           The name of the field that is to be changed.
+
+        new_unit : string or Unit object
+           The name of the new unit.
+        """
+        if field == self.x_field:
+            self.x_data.convert_to_units(unit)
+        elif field == self.y_field:
+            self.y_data.convert_to_units(unit)
+        else:
+            raise KeyError("Field %s not in the plot!" % (field))
+        return self
+
+    @invalidate_plot
+    def set_xlim(self, xmin=None, xmax=None):
+        """Sets the limits of the x field
+
+        Parameters
+        ----------
+        
+        xmin : float or None
+          The new x minimum.  Defaults to None, which leaves the xmin
+          unchanged.
+
+        xmax : float or None
+          The new x maximum.  Defaults to None, which leaves the xmax
+          unchanged.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> pp = yt.ParticlePlot(ds.all_data(), 'particle_position_x', 'particle_velocity_x')
+        >>> pp.set_xlim(0.1, 0.9)
+        >>> pp.save()
+
+        """
+        self.x_lim = (xmin, xmax)
+        return self
+
+    @invalidate_plot
+    def set_ylim(self, ymin=None, ymax=None):
+        """Sets the limits for the y-axis of the plot.
+
+        Parameters
+        ----------
+
+        ymin : float or None
+          The new y minimum.  Defaults to None, which leaves the ymin
+          unchanged.
+
+        ymax : float or None
+          The new y maximum.  Defaults to None, which leaves the ymax
+          unchanged.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> pp = yt.ParticlePlot(ds.all_data(), 'particle_position_x', 'particle_velocity_x')
+        >>> pp.set_ylim(1e1, 1e8)
+        >>> pp.save()
+
+        """
+        self.y_lim = (ymin, ymax)
+        return self
+
+    def _get_axis_log(self):
+
+        xf, = self.data_source._determine_fields([self.x_field])
+        xfi = self.data_source.ds._get_field_info(*xf)
+        if self.x_log is None:
+            x_log = xfi.take_log
+        else:
+            x_log = self.x_log
+
+        yf, = self.data_source._determine_fields([self.y_field])
+        yfi = self.data_source.ds._get_field_info(*yf)
+        if self.y_log is None:
+            y_log = yfi.take_log
+        else:
+            y_log = self.y_log
+        
+        scales = {True: 'log', False: 'linear'}
+        return scales[x_log], scales[y_log]
+
+    def _get_field_label(self, field, field_info, field_unit):
+        field_unit = field_unit.latex_representation()
+        field_name = field_info.display_name
+        if isinstance(field, tuple): field = field[1]
+        if field_name is None:
+            field_name = r'$\rm{'+field+r'}$'
+            field_name = r'$\rm{'+field.replace('_','\/').title()+r'}$'
+        elif field_name.find('$') == -1:
+            field_name = field_name.replace(' ','\/')
+            field_name = r'$\rm{'+field_name+r'}$'
+        if field_unit is None or field_unit == '' or field_unit == '1':
+            label = field_name
+        else:
+            label = field_name+r'$\/\/('+field_unit+r')$'
+        return label
+
+    def _get_axis_titles(self):
+
+        xfi = self.data_source.ds._get_field_info(self.x_field)
+        x_unit = Unit(self.x_data.units, registry=self.data_source.ds.unit_registry)
+        x_title = self._get_field_label(self.x_field, xfi, x_unit)
+
+        yfi = self.data_source.ds._get_field_info(self.y_field)
+        y_unit = Unit(self.y_data.units, registry=self.data_source.ds.unit_registry)
+        y_title = self._get_field_label(self.y_field, yfi, y_unit)
+
+        return (x_title, y_title)


https://bitbucket.org/yt_analysis/yt/commits/f1bd4a3a0527/
Changeset:   f1bd4a3a0527
Branch:      yt
User:        atmyers
Date:        2014-09-21 21:48:32+00:00
Summary:     Merged yt_analysis/yt into yt
Affected #:  112 files

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -129,7 +129,14 @@
 are center_of_mass and bulk_velocity. Their definitions are available in 
 ``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that 
 your quantity may be of use to the general community, add it to 
-``halo_quantities.py`` and issue a pull request.
+``halo_quantities.py`` and issue a pull request.  Default halo quantities are:
+
+* ``particle_identifier`` -- Halo ID (e.g. 0 to N)
+* ``particle_mass`` -- Mass of halo
+* ``particle_position_x`` -- Location of halo
+* ``particle_position_y`` -- Location of halo
+* ``particle_position_z`` -- Location of halo
+* ``virial_radius`` -- Virial radius of halo
 
 An example of adding a quantity:
 

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -75,7 +75,8 @@
   mass. In simulations where the highest-resolution particles all have the 
   same mass (ie: zoom-in grid based simulations), one can set up a particle
   filter to select the lowest mass particles and perform the halo finding
-  only on those.
+  only on those.  See the this cookbook recipe for an example: 
+  :ref:`cookbook-rockstar-nested-grid`.
 
 To run the Rockstar Halo finding, you must launch python with MPI and 
 parallelization enabled. While Rockstar itself does not require MPI to run, 

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -263,7 +263,7 @@
 
    ds = load("my_data")
    sp = ds.sphere('c', (10, 'kpc'))
-   print ad.quantities.angular_momentum_vector()
+   print sp.quantities.angular_momentum_vector()
 
 Available Derived Quantities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -14,6 +14,22 @@
 
 .. yt_cookbook:: halo_plotting.py
 
+.. _cookbook-rockstar-nested-grid:
+
+Running Rockstar to Find Halos on Multi-Resolution-Particle Datasets
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The version of Rockstar installed with yt does not have the capability
+to work on datasets with particles of different masses.  Unfortunately,
+many simulations possess particles of different masses, notably cosmological 
+zoom datasets.  This recipe uses Rockstar in two different ways to generate a 
+HaloCatalog from the highest resolution dark matter particles (the ones 
+inside the zoom region).  It then overlays some of those halos on a projection
+as a demonstration.  See :ref:`halo-analysis` and :ref:`annotate-halos` for
+more information.
+
+.. yt_cookbook:: rockstar_nest.py
+
 .. _cookbook-halo_finding:
 
 Halo Profiling and Custom Analysis

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 doc/source/cookbook/power_spectrum_example.py
--- a/doc/source/cookbook/power_spectrum_example.py
+++ b/doc/source/cookbook/power_spectrum_example.py
@@ -57,7 +57,7 @@
     
     # physical limits to the wavenumbers
     kmin = np.min(1.0/L)
-    kmax = np.max(0.5*dims/L)
+    kmax = np.min(0.5*dims/L)
     
     kbins = np.arange(kmin, kmax, kmin)
     N = len(kbins)
@@ -112,7 +112,6 @@
     return np.abs(ru)**2
 
 
-if __name__ == "__main__":
 
-    ds = yt.load("maestro_xrb_lores_23437")
-    doit(ds)
+ds = yt.load("maestro_xrb_lores_23437")
+doit(ds)

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 doc/source/cookbook/rockstar_nest.py
--- /dev/null
+++ b/doc/source/cookbook/rockstar_nest.py
@@ -0,0 +1,74 @@
+# You must run this job in parallel.  
+# There are several mpi flags which can be useful in order for it to work OK.
+# It requires at least 3 processors in order to run because of the way in which 
+# rockstar divides up the work.  Make sure you have mpi4py installed as per 
+# http://yt-project.org/docs/dev/analyzing/parallel_computation.html#setting-up-parallel-yt
+    
+# Usage: mpirun -np <num_procs> --mca btl ^openib python this_script.py
+
+import yt
+from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog
+from yt.data_objects.particle_filters import add_particle_filter
+from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
+yt.enable_parallelism() # rockstar halofinding requires parallelism
+
+# Create a dark matter particle filter
+# This will be code dependent, but this function here is true for enzo
+
+def DarkMatter(pfilter, data):
+    filter = data[("all", "particle_type")] == 1 # DM = 1, Stars = 2
+    return filter
+
+add_particle_filter("dark_matter", function=DarkMatter, filtered_type='all', \
+                    requires=["particle_type"])
+
+# First, we make sure that this script is being run using mpirun with
+# at least 3 processors as indicated in the comments above.
+assert(yt.communication_system.communicators[-1].size >= 3)
+
+# Load the dataset and apply dark matter filter
+fn = "Enzo_64/DD0043/data0043"
+ds = yt.load(fn)
+ds.add_particle_filter('dark_matter')
+
+# Determine highest resolution DM particle mass in sim by looking
+# at the extrema of the dark_matter particle_mass field.
+ad = ds.all_data()
+min_dm_mass = ad.quantities.extrema(('dark_matter','particle_mass'))[0]
+
+# Define a new particle filter to isolate all highest resolution DM particles
+# and apply it to dataset
+def MaxResDarkMatter(pfilter, data):
+    return data["particle_mass"] <= 1.01 * min_dm_mass
+
+add_particle_filter("max_res_dark_matter", function=MaxResDarkMatter, \
+                    filtered_type='dark_matter', requires=["particle_mass"])
+ds.add_particle_filter('max_res_dark_matter')
+
+# If desired, we can see the total number of DM and High-res DM particles
+#if yt.is_root():
+#    print "Simulation has %d DM particles." % ad['dark_matter','particle_type'].shape
+#    print "Simulation has %d Highest Res DM particles." % ad['max_res_dark_matter', 'particle_type'].shape
+
+# Run the halo catalog on the dataset only on the highest resolution dark matter 
+# particles
+hc = HaloCatalog(data_ds=ds, finder_method='rockstar', \
+                 finder_kwargs={'dm_only':True, 'particle_type':'max_res_dark_matter'})
+hc.create()
+
+# Or alternatively, just run the RockstarHaloFinder and later import the 
+# output file as necessary.  You can skip this step if you've already run it
+# once, but be careful since subsequent halo finds will overwrite this data.
+#rhf = RockstarHaloFinder(ds, particle_type="max_res_dark_matter")
+#rhf.run()
+# Load the halo list from a rockstar output for this dataset
+# Create a projection with the halos overplot on top
+#halos = yt.load('rockstar_halos/halos_0.0.bin')
+#hc = HaloCatalog(halos_ds=halos)
+#hc.load()
+
+# Regardless of your method of creating the halo catalog, use it to overplot the
+# halos on a projection.
+p = yt.ProjectionPlot(ds, "x", "density")
+p.annotate_halos(hc, annotate_field = 'particle_identifier', width=(10,'Mpc'), factor=2)
+p.save()

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 doc/source/cookbook/tests/test_cookbook.py
--- /dev/null
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+"""Module for cookbook testing
+
+
+This test should be run from main yt directory.
+
+Example:
+
+      $ sed -e '/where/d' -i nose.cfg setup.cfg
+      $ nosetests doc/source/cookbook/tests/test_cookbook.py -P -v
+"""
+import glob
+import os
+import sys
+
+sys.path.append(os.path.join(os.getcwd(), "doc/source/cookbook"))
+
+
+def test_recipe():
+    '''Dummy test grabbing all cookbook's recipes'''
+    for fname in glob.glob("doc/source/cookbook/*.py"):
+        module_name = os.path.splitext(os.path.basename(fname))[0]
+        yield check_recipe, module_name
+
+
+def check_recipe(module_name):
+    '''Run single recipe'''
+    __import__(module_name)
+    assert True

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 doc/source/cookbook/thin_slice_projection.py
--- a/doc/source/cookbook/thin_slice_projection.py
+++ b/doc/source/cookbook/thin_slice_projection.py
@@ -4,7 +4,7 @@
 ds = yt.load("Enzo_64/DD0030/data0030")
 
 # Make a projection that is the full width of the domain,
-# but only 10 Mpc in depth.  This is done by creating a
+# but only 5 Mpc in depth.  This is done by creating a
 # region object with this exact geometry and providing it
 # as a data_source for the projection.
 
@@ -17,12 +17,12 @@
 right_corner = ds.domain_right_edge
 
 # Now adjust the size of the region along the line of sight (x axis).
-depth = ds.quan(10.0,'Mpc')
+depth = ds.quan(5.0,'Mpc')
 left_corner[0] = center[0] - 0.5 * depth
-left_corner[0] = center[0] + 0.5 * depth
+right_corner[0] = center[0] + 0.5 * depth
 
 # Create the region
-region = ds.region(center, left_corner, right_corner)
+region = ds.box(left_corner, right_corner)
 
 # Create a density projection and supply the region we have just created.
 # Only cells within the region will be included in the projection.

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 doc/source/examining/Loading_Generic_Particle_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Particle_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Particle_Data.ipynb
@@ -74,7 +74,7 @@
       "import yt\n",
       "from yt.units import parsec, Msun\n",
       "\n",
-      "bbox = 1.1*np.array([[min(ppx), max(ppx)], [min(ppy), max(ppy)], [min(ppy), max(ppy)]])\n",
+      "bbox = 1.1*np.array([[min(ppx), max(ppx)], [min(ppy), max(ppy)], [min(ppz), max(ppz)]])\n",
       "\n",
       "ds = yt.load_particles(data, length_unit=parsec, mass_unit=1e8*Msun, n_ref=256, bbox=bbox)"
      ],

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 doc/source/examining/Loading_Spherical_Data.ipynb
--- /dev/null
+++ b/doc/source/examining/Loading_Spherical_Data.ipynb
@@ -0,0 +1,188 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:88ed88ce8d8f4a359052f287aea17a7cbed435ff960e195097b440191ce6c2ab"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Loading Spherical Data\n",
+      "\n",
+      "With version 3.0 of yt, it has gained the ability to load data from non-Cartesian systems.  This support is still being extended, but here is an example of how to load spherical data from a regularly-spaced grid.  For irregularly spaced grids, a similar setup can be used, but the `load_hexahedral_mesh` method will have to be used instead.\n",
+      "\n",
+      "Note that in yt, \"spherical\" means that it is ordered $r$, $\\theta$, $\\phi$, where $\\theta$ is the declination from the azimuth (running from $0$ to $\\pi$ and $\\phi$ is the angle around the zenith (running from $0$ to $2\\pi$).\n",
+      "\n",
+      "We first start out by loading yt."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import numpy as np\n",
+      "import yt"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now, we create a few derived fields.  The first three are just straight translations of the Cartesian coordinates, so that we can see where we are located in the data, and understand what we're seeing.  The final one is just a fun field that is some combination of the three coordinates, and will vary in all dimensions."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "@yt.derived_field(name = \"sphx\", units = \"cm\", take_log=False)\n",
+      "def sphx(field, data):\n",
+      "    return np.cos(data[\"phi\"]) * np.sin(data[\"theta\"])*data[\"r\"]\n",
+      "@yt.derived_field(name = \"sphy\", units = \"cm\", take_log=False)\n",
+      "def sphy(field, data):\n",
+      "    return np.sin(data[\"phi\"]) * np.sin(data[\"theta\"])*data[\"r\"]\n",
+      "@yt.derived_field(name = \"sphz\", units = \"cm\", take_log=False)\n",
+      "def sphz(field, data):\n",
+      "    return np.cos(data[\"theta\"])*data[\"r\"]\n",
+      "@yt.derived_field(name = \"funfield\", units=\"cm\", take_log=False)\n",
+      "def funfield(field, data):\n",
+      "    return (np.sin(data[\"phi\"])**2 + np.cos(data[\"theta\"])**2) * (1.0*data[\"r\"].uq+data[\"r\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Loading Data\n",
+      "\n",
+      "Now we can actually load our data.  We use the `load_uniform_grid` function here.  Normally, the first argument would be a dictionary of field data, where the keys were the field names and the values the field data arrays.  Here, we're just going to look at derived fields, so we supply an empty one.\n",
+      "\n",
+      "The next few arguments are the number of dimensions, the bounds, and we then specify the geometry as spherical."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds = yt.load_uniform_grid({}, [128, 128, 128],\n",
+      "                          bbox=np.array([[0.0, 1.0], [0.0, np.pi], [0.0, 2*np.pi]]),\n",
+      "                          geometry=\"spherical\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Looking at Data\n",
+      "\n",
+      "Now we can take slices.  The first thing we will try is making a slice of data along the \"phi\" axis, here $\\pi/2$, which will be along the y axis in the positive direction.  We use the `.slice` attribute, which creates a slice, and then we convert this into a plot window.  Note that here 2 is used to indicate the third axis (0-indexed) which for spherical data is $\\phi$.\n",
+      "\n",
+      "This is the manual way of creating a plot -- below, we'll use the standard, automatic ways.  Note that the coordinates run from $-r$ to $r$ along the $z$ axis and from $0$ to $r$ along the $R$ axis.  We use the capital $R$ to indicate that it's the $R$ along the $x-y$ plane."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s = ds.slice(2, np.pi/2)\n",
+      "p = s.to_pw(\"funfield\", origin=\"native\")\n",
+      "p.set_zlim(\"all\", 0.0, 4.0)\n",
+      "p.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also slice along $r$.  For now, this creates a regular grid with *incorrect* units for phi and theta.  We are currently exploring two other options -- a simple aitoff projection, and fixing it to use the correct units as-is."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s = yt.SlicePlot(ds, \"r\", \"funfield\")\n",
+      "s.set_zlim(\"all\", 0.0, 4.0)\n",
+      "s.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also slice at constant $\\theta$.  But, this is a weird thing!  We're slicing at a constant declination from the azimuth.  What this means is that when thought of in a Cartesian domain, this slice is actually a cone.  The axes have been labeled appropriately, to indicate that these are not exactly the $x$ and $y$ axes, but instead differ by a factor of $\\sin(\\theta))$."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s = yt.SlicePlot(ds, \"theta\", \"funfield\")\n",
+      "s.set_zlim(\"all\", 0.0, 4.0)\n",
+      "s.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We've seen lots of the `funfield` plots, but we can also look at the Cartesian axes.  This next plot plots the Cartesian $x$, $y$ and $z$ values on a $\\theta$ slice.  Because we're not supplying an argument to the `center` parameter, yt will place it at the center of the $\\theta$ axis, which will be at $\\pi/2$, where it will be aligned with the $x-y$ plane.  The slight change in `sphz` results from the cells themselves migrating, and plotting the center of those cells."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s = yt.SlicePlot(ds, \"theta\", [\"sphx\", \"sphy\", \"sphz\"])\n",
+      "s.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can do the same with the $\\phi$ axis."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": true,
+     "input": [
+      "s = yt.SlicePlot(ds, \"phi\", [\"sphx\", \"sphy\", \"sphz\"])\n",
+      "s.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 doc/source/examining/index.rst
--- a/doc/source/examining/index.rst
+++ b/doc/source/examining/index.rst
@@ -9,4 +9,5 @@
    loading_data
    generic_array_data
    generic_particle_data
+   spherical_data
    low_level_inspection

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 doc/source/examining/spherical_data.rst
--- /dev/null
+++ b/doc/source/examining/spherical_data.rst
@@ -0,0 +1,6 @@
+.. _loading-spherical-data:
+
+Loading Spherical Data
+======================
+
+.. notebook:: Loading_Spherical_Data.ipynb

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -24,7 +24,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Castro                |     Y      |     Y     |   Partial  |   Y   |    Y     |    Y     |     N      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Chombo                |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      | Partial  |
+| Chombo                |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Enzo                  |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -151,19 +151,28 @@
 Overplot Halo Annotations
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_halos(self, halo_catalog, col='white', alpha=1, \
-                             width=None):
+.. function:: annotate_halos(self, halo_catalog, circle_kwargs=None, width=None, \ 
+                             annotate_field=None, font_kwargs=None, factor=1.0):
 
    (This is a proxy for
    :class:`~yt.visualization.plot_modifications.HaloCatalogCallback`.)
 
    Accepts a :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` 
-   and plots a circle at the location of each
-   halo with the radius of the circle corresponding to the virial radius of the
-   halo.  If ``width`` is set to None (default) all halos are plotted.
-   Otherwise, only halos that fall within a slab with width ``width`` centered
-   on the center of the plot data. The color and transparency of the circles can
-   be controlled with ``col`` and ``alpha`` respectively.
+   and plots a circle at the location of each halo with the radius of the 
+   circle corresponding to the virial radius of the halo.  If ``width`` is set 
+   to None (default) all halos are plotted, otherwise it accepts a tuple in 
+   the form (1.0, ‘Mpc’) to only display halos that fall within a slab with 
+   width ``width`` centered on the center of the plot data.  The appearance of 
+   the circles can be changed with the circle_kwargs dictionary, which is 
+   supplied to the Matplotlib patch Circle.  One can label each of the halos 
+   with the annotate_field, which accepts a field contained in the halo catalog 
+   to add text to the plot near the halo (example: ``annotate_field=
+   'particle_mass'`` will write the halo mass next to each halo, whereas 
+   ``'particle_identifier'`` shows the halo number).  font_kwargs contains the 
+   arguments controlling the text appearance of the annotated field.
+   Factor is the number the virial radius is multiplied by for plotting the 
+   circles. Ex: ``factor=2.0`` will plot circles with twice the radius of each 
+   halo virial radius.
 
 .. python-script::
 
@@ -177,7 +186,7 @@
    hc.create()
 
    prj = yt.ProjectionPlot(data_ds, 'z', 'density')
-   prj.annotate_halos(hc)
+   prj.annotate_halos(hc, annotate_field='particle_identifier')
    prj.save()
 
 Overplot a Straight Line

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 doc/source/visualizing/colormaps/index.rst
--- a/doc/source/visualizing/colormaps/index.rst
+++ b/doc/source/visualizing/colormaps/index.rst
@@ -6,14 +6,20 @@
 There are several colormaps available for yt.  yt includes all of the 
 matplotlib colormaps as well for nearly all functions.  Individual visualization
 functions usually allow you to specify a colormap with the ``cmap`` flag.
-There are a small number of functions (mostly contained in the image_writer 
-module; e.g. write_bitmap, write_image, write_projection, etc.), which do 
-not load the matplotlib infrastructure and can only access the colormaps 
-native to yt.  
 
-Here is a chart of all of the colormaps available.  In addition to each 
-colormap displayed here, you can access its "reverse" by simply appending a 
-``"_r"`` to the end of the colormap name.
+If you have installed brewer2mpl (``pip install brewer2mpl`` or see
+`https://github.com/jiffyclub/brewer2mpl
+<https://github.com/jiffyclub/brewer2mpl>`_), you can also access the discrete
+colormaps available on `http://colorbrewer2.org <http://colorbrewer2.org>`_.
+Instead of supplying the colormap name, specify a tuple of the form (name, type,
+number), for example ``('RdBu', 'Diverging', 9)``.  These discrete colormaps will
+not be interpolated, and can be useful for creating
+colorblind/printer/grayscale-friendly plots. For more information, visit
+`http://colorbrewer2.org <http://colorbrewer2.org>`_.
+
+Here is a chart of all of the yt and matplotlib colormaps available.  In
+addition to each colormap displayed here, you can access its "reverse" by simply
+appending a ``"_r"`` to the end of the colormap name.
 
 All Colormaps (including matplotlib)
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -90,6 +90,7 @@
     kwargs = dict()
 
 ip.ex("from yt.mods import *")
+ip.ex("import yt")
 
 # Now we add some tab completers, in the vein of:
 # http://pymel.googlecode.com/svn/trunk/tools/ipymel.py

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 setup.py
--- a/setup.py
+++ b/setup.py
@@ -103,7 +103,7 @@
         options = Cython.Compiler.Main.CompilationOptions(
             defaults=Cython.Compiler.Main.default_options,
             include_path=extension.include_dirs,
-            language=extension.language, cplus=cplus,
+            cplus=cplus,
             output_file=target_file)
         cython_result = Cython.Compiler.Main.compile(source,
                                                      options=options)

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -195,7 +195,6 @@
     ## tau_0
     tau_X = np.sqrt(np.pi) * e**2 / (me * ccgs) * \
         column_density * fval / vdop
-    tau1 = tau_X * lam1cgs
     tau0 = tau_X * lam0cgs
 
     # dimensionless frequency offset in units of doppler freq

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -328,7 +328,7 @@
                                                         output["redshift"])
                 proper_box_size = self.simulation.box_size / \
                   (1.0 + output["redshift"])
-                pixel_xarea = (proper_box_size.in_cgs() / pixels)**2 #in proper cm^2
+                pixel_area = (proper_box_size.in_cgs() / pixels)**2 #in proper cm^2
                 factor = pixel_area / (4.0 * np.pi * dL.in_cgs()**2)
                 mylog.info("Distance to slice = %s" % dL)
                 frb[field] *= factor #in erg/s/cm^2/Hz on observer"s image plane.

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/analysis_modules/halo_analysis/fields.py
--- a/yt/analysis_modules/halo_analysis/fields.py
+++ b/yt/analysis_modules/halo_analysis/fields.py
@@ -30,7 +30,7 @@
         sl_right = slice(2, None, None)
         div_fac = 2.0
     else:
-        sl_left, sl_right, div_face = slice_info
+        sl_left, sl_right, div_fac = slice_info
 
     def _virial_radius(field, data):
         virial_radius = data.get_field_parameter("virial_radius")

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -80,7 +80,6 @@
     """
 
     dds = halo.halo_catalog.data_ds
-    hds = halo.halo_catalog.halos_ds
     center = dds.arr([halo.quantities["particle_position_%s" % axis] \
                       for axis in "xyz"])
     radius = factor * halo.quantities[radius_field]

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -364,7 +364,6 @@
         if self.halos_ds is None:
             # Find the halos and make a dataset of them
             self.halos_ds = self.finder_method(self.data_ds)
-            self.halos_ds.index
             if self.halos_ds is None:
                 mylog.warning('No halos were found for {0}'.format(\
                         self.data_ds.basename))
@@ -373,6 +372,7 @@
                     self.save_catalog()
                     self.halos_ds = None
                 return
+            self.halos_ds.index
 
             # Assign ds and data sources appropriately
             self.data_source = self.halos_ds.all_data()

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -788,7 +788,7 @@
     
         # Now compute the CDM+HDM+baryon transfer functions
         tf_cb = self.tf_master*self.growth_cb/self.growth_k0;
-        tf_cbnu = self.tf_master*self.growth_cbnu/self.growth_k0;
+        #tf_cbnu = self.tf_master*self.growth_cbnu/self.growth_k0;
         return tf_cb
 
 
@@ -832,7 +832,6 @@
     area1 = np.sum(areas)
     # Now we refine until the error is smaller than *error*.
     diff = area1 - area0
-    area_final = area1
     area_last = area1
     one_pow = 3
     while diff > error:

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -32,7 +32,6 @@
     contours = {}
     node_ids = []
     DLE = data_source.ds.domain_left_edge
-    total_vol = None
     selector = getattr(data_source, "base_object", data_source).selector
     masks = dict((g.id, m) for g, m in data_source.blocks)
     for (g, node, (sl, dims, gi)) in data_source.tiles.slice_traverse():

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -128,7 +128,6 @@
         energy = self.spectral_model.ebins
     
         cell_em = EM[idxs]*vol_scale
-        cell_vol = vol[idxs]*vol_scale
     
         number_of_photons = np.zeros(dshape, dtype='uint64')
         energies = []
@@ -139,7 +138,6 @@
 
         for i, ikT in enumerate(kT_idxs):
 
-            ncells = int(bcounts[i])
             ibegin = bcell[i]
             iend = ecell[i]
             kT = kT_bins[ikT] + 0.5*dkT

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -490,7 +490,6 @@
         z_hat = orient.unit_vectors[2]
 
         n_ph = self.photons["NumberOfPhotons"]
-        num_cells = len(n_ph)
         n_ph_tot = n_ph.sum()
         
         eff_area = None
@@ -667,7 +666,6 @@
         tblhdu = hdulist["MATRIX"]
         n_de = len(tblhdu.data["ENERG_LO"])
         mylog.info("Number of energy bins in RMF: %d" % (n_de))
-        de = tblhdu.data["ENERG_HI"] - tblhdu.data["ENERG_LO"]
         mylog.info("Energy limits: %g %g" % (min(tblhdu.data["ENERG_LO"]),
                                              max(tblhdu.data["ENERG_HI"])))
 
@@ -682,7 +680,6 @@
         phYY = events["ypix"][eidxs]
 
         detectedChannels = []
-        pindex = 0
 
         # run through all photon energies and find which bin they go in
         k = 0

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -128,7 +128,6 @@
     if fni.endswith('.fits'):
         fni = fni.replace('.fits','')
 
-    ndomains_finished = 0
     for (num_halos, domain, halos) in domains_list:
         dle,dre = domain
         print 'exporting: '
@@ -154,7 +153,6 @@
             fh.write("%6.6e \n"%(halo.Rvir*ds['kpc']))
         fh.close()
         export_to_sunrise(ds, fnf, star_particle_type, dle*1.0/dn, dre*1.0/dn)
-        ndomains_finished +=1
 
 def domains_from_halos(ds,halo_list,frvir=0.15):
     domains = {}
@@ -172,8 +170,6 @@
     domains_list = [(len(v),k,v) for k,v in domains.iteritems()]
     domains_list.sort() 
     domains_list.reverse() #we want the most populated domains first
-    domains_limits = [d[1] for d in domains_list]
-    domains_halos  = [d[2] for d in domains_list]
     return domains_list
 
 def prepare_octree(ds,ile,start_level=0,debug=True,dd=None,center=None):
@@ -245,10 +241,6 @@
     hs       = hilbert_state()
     start_time = time.time()
     if debug:
-        if center is not None: 
-            c = center*ds['kpc']
-        else:
-            c = ile*1.0/ds.domain_dimensions*ds['kpc']
         printing = lambda x: print_oct(x)
     else:
         printing = None
@@ -332,7 +324,7 @@
         #then translate onto the subgrid integer index 
         parent_fle  = grid.left_edges + cell_index*grid.dx
         subgrid_ile = np.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
-        for i, (vertex,hilbert_child) in enumerate(hilbert):
+        for (vertex, hilbert_child) in hilbert:
             #vertex is a combination of three 0s and 1s to 
             #denote each of the 8 octs
             if level < 0:

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -89,8 +89,6 @@
     L = 2 * R * cm_per_kpc
     bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) * L
 
-    dl = L/nz
-
     ds = load_uniform_grid(data, ddims, length_unit='cm', bbox=bbox)
     ds.index
 

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -418,7 +418,6 @@
         otherwise Glue will be started.
         """
         from glue.core import DataCollection, Data
-        from glue.core.coordinates import coordinates_from_header
         from glue.qt.glue_application import GlueApplication
         
         gdata = Data(label=label)
@@ -494,6 +493,18 @@
                     ftype = self._current_fluid_type
                     if (ftype, fname) not in self.ds.field_info:
                         ftype = self.ds._last_freq[0]
+
+                # really ugly check to ensure that this field really does exist somewhere,
+                # in some naming convention, before returning it as a possible field type
+                if (ftype,fname) not in self.ds.field_list and \
+                        fname not in self.ds.field_list and \
+                        (ftype,fname) not in self.ds.derived_field_list and \
+                        fname not in self.ds.derived_field_list and \
+                        (ftype,fname) not in self._container_fields:
+                    raise YTFieldNotFound((ftype,fname),self.ds)
+
+            # these tests are really insufficient as a field type may be valid, and the
+            # field name may be valid, but not the combination (field type, field name)
             if finfo.particle_type and ftype not in self.ds.particle_types:
                 raise YTFieldTypeNotFound(ftype)
             elif not finfo.particle_type and ftype not in self.ds.fluid_types:
@@ -621,7 +632,7 @@
                 fields_to_generate.append(field)
                 continue
             fields_to_get.append(field)
-        if len(fields_to_get) == 0 and fields_to_generate == 0:
+        if len(fields_to_get) == 0 and len(fields_to_generate) == 0:
             return
         elif self._locked == True:
             raise GenerationInProgress(fields)
@@ -787,13 +798,16 @@
     def _get_pw(self, fields, center, width, origin, plot_type):
         from yt.visualization.plot_window import \
             get_window_parameters, PWViewerMPL
-        from yt.visualization.fixed_resolution import FixedResolutionBuffer as frb
+        from yt.visualization.fixed_resolution import \
+            FixedResolutionBuffer as frb
         axis = self.axis
         skip = self._key_fields
         skip += list(set(frb._exclude_fields).difference(set(self._key_fields)))
-        self.fields = ensure_list(fields) + \
-            [k for k in self.field_data if k not in skip]
-        (bounds, center) = get_window_parameters(axis, center, width, self.ds)
+        self.fields = [k for k in self.field_data if k not in skip]
+        if fields is not None:
+            self.fields = ensure_list(fields) + self.fields
+        (bounds, center, display_center) = \
+            get_window_parameters(axis, center, width, self.ds)
         pw = PWViewerMPL(self, bounds, fields=self.fields, origin=origin,
                          frb_generator=frb, plot_type=plot_type)
         pw._setup_plots()
@@ -1177,16 +1191,15 @@
         return self._particle_handler
 
 
-    def volume(self, unit = "unitary"):
+    def volume(self):
         """
-        Return the volume of the data container in units *unit*.
+        Return the volume of the data container.
         This is found by adding up the volume of the cells with centers
         in the container, rather than using the geometric shape of
         the container, so this may vary very slightly
         from what might be expected from the geometric volume.
         """
-        return self.quantities["TotalQuantity"]("CellVolume")[0] * \
-            (self.ds[unit] / self.ds['cm']) ** 3.0
+        return self.quantities.total_quantity(("index", "cell_volume"))
 
 # Many of these items are set up specifically to ensure that
 # we are not breaking old pickle files.  This means we must only call the

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -177,7 +177,7 @@
                 self.ds.domain_left_edge,
                 self.ds.domain_right_edge,
                 over_refine = self._oref)
-            particle_octree.n_ref = nneighbors / 2
+            particle_octree.n_ref = nneighbors
             particle_octree.add(morton)
             particle_octree.finalize()
             pdom_ind = particle_octree.domain_ind(self.selector)

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -45,17 +45,12 @@
     YTArray, \
     YTQuantity
 
-from yt.geometry.cartesian_coordinates import \
-    CartesianCoordinateHandler
-from yt.geometry.polar_coordinates import \
-    PolarCoordinateHandler
-from yt.geometry.cylindrical_coordinates import \
-    CylindricalCoordinateHandler
-from yt.geometry.spherical_coordinates import \
-    SphericalCoordinateHandler
-from yt.geometry.geographic_coordinates import \
-    GeographicCoordinateHandler
-from yt.geometry.spec_cube_coordinates import \
+from yt.geometry.coordinates.api import \
+    CartesianCoordinateHandler, \
+    PolarCoordinateHandler, \
+    CylindricalCoordinateHandler, \
+    SphericalCoordinateHandler, \
+    GeographicCoordinateHandler, \
     SpectralCubeCoordinateHandler
 
 # We want to support the movie format in the future.
@@ -460,8 +455,6 @@
             self._last_freq = field
             self._last_finfo = self.field_info[(ftype, fname)]
             return self._last_finfo
-        if fname == self._last_freq[1]:
-            return self._last_finfo
         if fname in self.field_info:
             # Sometimes, if guessing_type == True, this will be switched for
             # the type of field it is.  So we look at the field type and

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -54,12 +54,13 @@
                 yield assert_equal, np.unique(proj["py"]), uc[yax]
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
-                pw = proj.to_pw(fields='density')
-                for p in pw.plots.values():
-                    tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
-                    os.close(tmpfd)
-                    p.save(name=tmpname)
-                    fns.append(tmpname)
+                plots = [proj.to_pw(fields='density'), proj.to_pw()]
+                for pw in plots:
+                    for p in pw.plots.values():
+                        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+                        os.close(tmpfd)
+                        p.save(name=tmpname)
+                        fns.append(tmpname)
                 frb = proj.to_frb((1.0, 'unitary'), 64)
                 for proj_field in ['ones', 'density']:
                     fi = ds._get_field_info(proj_field)

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/data_objects/tests/test_spheres.py
--- a/yt/data_objects/tests/test_spheres.py
+++ b/yt/data_objects/tests/test_spheres.py
@@ -6,10 +6,11 @@
     from yt.config import ytcfg
     ytcfg["yt","__withintesting"] = "True"
 
+_fields_to_compare = ("spherical_r", "cylindrical_r",
+                      "spherical_theta", "cylindrical_theta",
+                      "spherical_phi", "cylindrical_z")
+
 def test_domain_sphere():
-    ds = fake_random_ds(16, fields = ("density"))
-    sp = ds.sphere(ds.domain_center, ds.domain_width[0])
-
     # Now we test that we can get different radial velocities based on field
     # parameters.
 
@@ -51,3 +52,12 @@
     yield assert_equal, np.any(rp0["radial_velocity"][rp0.used] ==
                                rp1["radial_velocity"][rp1.used]), \
                                False
+
+    ref_sp = ds.sphere("c", 0.25)
+    for f in _fields_to_compare:
+        ref_sp[f].sort()
+    for center in periodicity_cases(ds):
+        sp = ds.sphere(center, 0.25)
+        for f in _fields_to_compare:
+            sp[f].sort()
+            yield assert_equal, sp[f], ref_sp[f]

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -322,10 +322,6 @@
     create_magnitude_field(registry, "particle_specific_angular_momentum",
                            "cm**2/s", ftype=ptype, particle_type=True)
     
-    def _particle_angular_momentum(field, data):
-        return data[ptype, "particle_mass"] \
-             * data[ptype, "particle_specific_angular_momentum"]
-
     def _particle_angular_momentum_x(field, data):
         return data[ptype, "particle_mass"] * \
                data[ptype, "particle_specific_angular_momentum_x"]
@@ -350,6 +346,15 @@
              units="g*cm**2/s", particle_type=True,
              validators=[ValidateParameter('center')])
 
+    def _particle_angular_momentum(field, data):
+        return data[ptype, "particle_mass"] \
+            * data[ptype, "particle_specific_angular_momentum"]
+    registry.add_field((ptype, "particle_angular_momentum"),
+              function=_particle_angular_momentum,
+              particle_type=True,
+              units="g*cm**2/s",
+              validators=[ValidateParameter("center")])
+
     create_magnitude_field(registry, "particle_angular_momentum",
                            "g*cm**2/s", ftype=ptype, particle_type=True)
     

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -43,18 +43,14 @@
                            ftype = "gas", slice_info = None,
                            validators = None, particle_type=False):
 
-    xn, yn, zn = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
-
-    # Is this safe?
-    if registry.ds.dimensionality < 3:
-        zn = ("index", "zeros")
-    if registry.ds.dimensionality < 2:
-        yn = ("index", "zeros")
+    field_components = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
 
     def _magnitude(field, data):
-        mag  = data[xn] * data[xn]
-        mag += data[yn] * data[yn]
-        mag += data[zn] * data[zn]
+        fn = field_components[0]
+        mag = data[fn] * data[fn]
+        for idim in range(1, registry.ds.dimensionality):
+            fn = field_components[idim]
+            mag += data[fn] * data[fn]
         return np.sqrt(mag)
 
     registry.add_field((ftype, "%s_magnitude" % basename),
@@ -65,18 +61,14 @@
                          ftype = "gas", slice_info = None,
                          validators = None, particle_type=False):
 
-    xn, yn, zn = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
-
-    # Is this safe?
-    if registry.ds.dimensionality < 3:
-        zn = ("index", "zeros")
-    if registry.ds.dimensionality < 2:
-        yn = ("index", "zeros")
+    field_components = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
 
     def _squared(field, data):
-        squared  = data[xn] * data[xn]
-        squared += data[yn] * data[yn]
-        squared += data[zn] * data[zn]
+        fn = field_components[0]
+        squared  = data[fn] * data[fn]
+        for idim in range(1, registry.ds.dimensionality):
+            fn = field_components[idim]
+            squared += data[fn] * data[fn]
         return squared
 
     registry.add_field((ftype, "%s_squared" % basename),
@@ -131,7 +123,7 @@
     registry.add_field((ftype, "radial_%s" % basename),
                        function = _radial, units = field_units)
     registry.add_field((ftype, "radial_%s_absolute" % basename),
-                       function = _radial, units = field_units)
+                       function = _radial_absolute, units = field_units)
     registry.add_field((ftype, "tangential_%s" % basename),
                        function=_tangential, units = field_units)
 

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/frontends/art/tests/test_outputs.py
--- a/yt/frontends/art/tests/test_outputs.py
+++ b/yt/frontends/art/tests/test_outputs.py
@@ -14,11 +14,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+from yt.testing import \
+    requires_file, \
+    assert_equal
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
-    small_patch_amr, \
     big_patch_amr, \
+    PixelizedProjectionValuesTest, \
     data_dir_load
 from yt.frontends.art.api import ARTDataset
 
@@ -41,3 +43,8 @@
                     yield PixelizedProjectionValuesTest(
                         d9p, axis, field, weight_field,
                         dobj_name)
+
+
+ at requires_file(d9p)
+def test_ARTDataset():
+    assert isinstance(data_dir_load(d9p), ARTDataset)

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -1,5 +1,5 @@
 """
-ARTIO frontend tests 
+ARTIO frontend tests
 
 
 
@@ -24,7 +24,7 @@
 from yt.frontends.artio.api import ARTIODataset
 
 _fields = ("temperature", "density", "velocity_magnitude",
-           ("deposit", "all_density"), ("deposit", "all_count")) 
+           ("deposit", "all_density"), ("deposit", "all_count"))
 
 sizmbhloz = "sizmbhloz-clref04SNth-rs9_a0.9011/sizmbhloz-clref04SNth-rs9_a0.9011.art"
 @requires_ds(sizmbhloz)
@@ -45,3 +45,8 @@
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
+
+
+ at requires_file(sizmbhloz)
+def test_ARTIODataset():
+    assert isinstance(data_dir_load(sizmbhloz), ARTIODataset)

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -57,3 +57,8 @@
     for test in small_patch_amr(stripping, _fields_stripping):
         test_stripping.__name__ = test.description
         yield test
+
+
+ at requires_file(cloud)
+def test_AthenaDataset():
+    assert isinstance(data_dir_load(cloud), AthenaDataset)

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -1,5 +1,5 @@
 """
-Data structures for Boxlib Codes 
+Data structures for BoxLib Codes
 
 
 
@@ -15,10 +15,8 @@
 
 import os
 import re
-import weakref
 import itertools
 
-from collections import defaultdict
 from stat import ST_CTIME
 
 import numpy as np
@@ -27,53 +25,46 @@
 from yt.data_objects.grid_patch import AMRGridPatch
 from yt.geometry.grid_geometry_handler import GridIndex
 from yt.data_objects.static_output import Dataset
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level
-from yt.geometry.selection_routines import \
-    RegionSelector
 from yt.utilities.io_handler import \
     io_registry
-from yt.utilities.physical_constants import \
-    cm_per_mpc
 
 from .fields import \
     BoxlibFieldInfo, \
     MaestroFieldInfo, \
     CastroFieldInfo
 
-from .io import IOHandlerBoxlib
 # This is what we use to find scientific notation that might include d's
 # instead of e's.
 _scinot_finder = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
 # This is the dimensions in the Cell_H file for each level
 # It is different for different dimensionalities, so we make a list
-_dim_finder = [ \
+_dim_finder = [
     re.compile(r"\(\((\d+)\) \((\d+)\) \(\d+\)\)$"),
     re.compile(r"\(\((\d+,\d+)\) \((\d+,\d+)\) \(\d+,\d+\)\)$"),
     re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")]
 # This is the line that prefixes each set of data for a FAB in the FAB file
 # It is different for different dimensionalities, so we make a list
 _endian_regex = r"^FAB \(\(\d+, \([0-9 ]+\)\),\((\d+), \(([0-9 ]+)\)\)\)"
-_header_pattern = [ \
-    re.compile(_endian_regex + 
+_header_pattern = [
+    re.compile(_endian_regex +
                r"\(\((\d+)\) \((\d+)\) \((\d+)\)\) (\d+)\n"),
-    re.compile(_endian_regex + 
+    re.compile(_endian_regex +
                r"\(\((\d+,\d+)\) \((\d+,\d+)\) \((\d+,\d+)\)\) (\d+)\n"),
-    re.compile(_endian_regex + 
+    re.compile(_endian_regex +
                r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n")]
 
 
-
 class BoxlibGrid(AMRGridPatch):
     _id_offset = 0
     _offset = -1
 
-    def __init__(self, grid_id, offset, filename = None,
-                 index = None):
+    def __init__(self, grid_id, offset, filename=None,
+                 index=None):
         super(BoxlibGrid, self).__init__(grid_id, filename, index)
         self._base_offset = offset
         self._parent_id = []
@@ -126,7 +117,7 @@
         return coords
 
     # Override this as well, since refine_by can vary
-    def _fill_child_mask(self, child, mask, tofill, dlevel = 1):
+    def _fill_child_mask(self, child, mask, tofill, dlevel=1):
         rf = self.ds.ref_factors[self.Level]
         if dlevel != 1:
             raise NotImplementedError
@@ -139,8 +130,10 @@
              startIndex[1]:endIndex[1],
              startIndex[2]:endIndex[2]] = tofill
 
+
 class BoxlibHierarchy(GridIndex):
     grid = BoxlibGrid
+
     def __init__(self, ds, dataset_type='boxlib_native'):
         self.dataset_type = dataset_type
         self.header_filename = os.path.join(ds.output_dir, 'Header')
@@ -149,19 +142,17 @@
         GridIndex.__init__(self, ds, dataset_type)
         self._cache_endianness(self.grids[-1])
 
-        #self._read_particles()
-
     def _parse_index(self):
         """
         read the global header file for an Boxlib plotfile output.
         """
         self.max_level = self.dataset._max_level
-        header_file = open(self.header_filename,'r')
+        header_file = open(self.header_filename, 'r')
 
         self.dimensionality = self.dataset.dimensionality
         _our_dim_finder = _dim_finder[self.dimensionality-1]
-        DRE = self.dataset.domain_right_edge # shortcut
-        DLE = self.dataset.domain_left_edge # shortcut
+        DRE = self.dataset.domain_right_edge  # shortcut
+        DLE = self.dataset.domain_left_edge   # shortcut
 
         # We can now skip to the point in the file we want to start parsing.
         header_file.seek(self.dataset._header_mesh_start)
@@ -190,13 +181,13 @@
         if int(header_file.next()) != 0:
             raise RuntimeError("INTERNAL ERROR! This should be a zero.")
 
-        # each level is one group with ngrids on it. 
-        # each grid has self.dimensionality number of lines of 2 reals 
+        # each level is one group with ngrids on it.
+        # each grid has self.dimensionality number of lines of 2 reals
         self.grids = []
         grid_counter = 0
         for level in range(self.max_level + 1):
             vals = header_file.next().split()
-            lev, ngrids, cur_time = int(vals[0]),int(vals[1]),float(vals[2])
+            lev, ngrids = int(vals[0]), int(vals[1])
             assert(lev == level)
             nsteps = int(header_file.next())
             for gi in range(ngrids):
@@ -232,10 +223,10 @@
             for gi in range(ngrids):
                 # components within it
                 start, stop = _our_dim_finder.match(level_header_file.next()).groups()
-                # fix for non-3d data 
+                # fix for non-3d data
                 # note we append '0' to both ends b/c of the '+1' in dims below
                 start += ',0'*(3-self.dimensionality)
-                stop  += ',0'*(3-self.dimensionality)
+                stop += ',0'*(3-self.dimensionality)
                 start = np.array(start.split(","), dtype="int64")
                 stop = np.array(stop.split(","), dtype="int64")
                 dims = stop - start + 1
@@ -259,7 +250,7 @@
             # already read the filenames above...
         self.float_type = 'float64'
 
-    def _cache_endianness(self,test_grid):
+    def _cache_endianness(self, test_grid):
         """
         Cache the endianness and bytes perreal of the grids by using a
         test grid and assuming that all grids have the same
@@ -270,7 +261,7 @@
         # open the test file & grab the header
         with open(os.path.expanduser(test_grid.filename), 'rb') as f:
             header = f.readline()
-        
+
         bpr, endian, start, stop, centering, nc = \
             _header_pattern[self.dimensionality-1].search(header).groups()
         # Note that previously we were using a different value for BPR than we
@@ -294,7 +285,8 @@
         self.grids = np.array(self.grids, dtype='object')
         self._reconstruct_parent_child()
         for i, grid in enumerate(self.grids):
-            if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
+            if (i % 1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i,
+                                           self.num_grids)
             grid._prepare_grid()
             grid._setup_dx()
         mylog.debug("Done creating grid objects")
@@ -308,10 +300,10 @@
                                 self.grid_levels[i] + 1,
                                 self.grid_left_edge, self.grid_right_edge,
                                 self.grid_levels, mask)
-            ids = np.where(mask.astype("bool")) # where is a tuple
-            grid._children_ids = ids[0] + grid._id_offset 
+            ids = np.where(mask.astype("bool"))  # where is a tuple
+            grid._children_ids = ids[0] + grid._id_offset
         mylog.debug("Second pass; identifying parents")
-        for i, grid in enumerate(self.grids): # Second pass
+        for i, grid in enumerate(self.grids):  # Second pass
             for child in grid.Children:
                 child._parent_id.append(i + grid._id_offset)
 
@@ -331,10 +323,10 @@
         for line in header_file:
             if len(line.split()) != 3: continue
             self.num_grids += int(line.split()[1])
-        
+
     def _initialize_grid_arrays(self):
         super(BoxlibHierarchy, self)._initialize_grid_arrays()
-        self.grid_start_index = np.zeros((self.num_grids,3), 'int64')
+        self.grid_start_index = np.zeros((self.num_grids, 3), 'int64')
 
     def _initialize_state_variables(self):
         """override to not re-initialize num_grids in AMRHierarchy.__init__
@@ -349,7 +341,7 @@
         self.field_list = [("boxlib", f) for f in
                            self.dataset._field_list]
         self.field_indexes = dict((f[1], i)
-                                for i, f in enumerate(self.field_list))
+                                  for i, f in enumerate(self.field_list))
         # There are times when field_list may change.  We copy it here to
         # avoid that possibility.
         self.field_order = [f for f in self.field_list]
@@ -357,6 +349,7 @@
     def _setup_data_io(self):
         self.io = io_registry[self.dataset_type](self.dataset)
 
+
 class BoxlibDataset(Dataset):
     """
     This class is a stripped down class that simply reads and parses
@@ -370,10 +363,10 @@
     periodicity = (True, True, True)
 
     def __init__(self, output_dir,
-                 cparam_filename = "inputs",
-                 fparam_filename = "probin",
+                 cparam_filename="inputs",
+                 fparam_filename="probin",
                  dataset_type='boxlib_native',
-                 storage_filename = None):
+                 storage_filename=None):
         """
         The paramfile is usually called "inputs"
         and there may be a fortran inputs file usually called "probin"
@@ -390,14 +383,13 @@
         Dataset.__init__(self, output_dir, dataset_type)
 
         # These are still used in a few places.
-        if not "HydroMethod" in self.parameters.keys():
+        if "HydroMethod" not in self.parameters.keys():
             self.parameters["HydroMethod"] = 'boxlib'
-        self.parameters["Time"] = 1. # default unit is 1...
-        self.parameters["EOSType"] = -1 # default
+        self.parameters["Time"] = 1.     # default unit is 1...
+        self.parameters["EOSType"] = -1  # default
         self.parameters["gamma"] = self.parameters.get(
             "materials.gamma", 1.6667)
 
-
     def _localize_check(self, fn):
         # If the file exists, use it.  If not, set it to None.
         root_dir = os.path.dirname(self.output_dir)
@@ -410,6 +402,8 @@
     def _is_valid(cls, *args, **kwargs):
         # fill our args
         output_dir = args[0]
+        # boxlib datasets are always directories
+        if not os.path.isdir(output_dir): return False
         header_filename = os.path.join(output_dir, "Header")
         jobinfo_filename = os.path.join(output_dir, "job_info")
         if not os.path.exists(header_filename):
@@ -418,11 +412,11 @@
         args = inspect.getcallargs(cls.__init__, args, kwargs)
         # This might need to be localized somehow
         inputs_filename = os.path.join(
-                            os.path.dirname(os.path.abspath(output_dir)),
-                            args['cparam_filename'])
+            os.path.dirname(os.path.abspath(output_dir)),
+            args['cparam_filename'])
         if not os.path.exists(inputs_filename) and \
            not os.path.exists(jobinfo_filename):
-            return True # We have no parameters to go off of
+            return True  # We have no parameters to go off of
         # If we do have either inputs or jobinfo, we should be deferring to a
         # different frontend.
         return False
@@ -464,7 +458,7 @@
             self.omega_lambda = self.parameters["comoving_OmL"]
             self.omega_matter = self.parameters["comoving_OmM"]
             self.hubble_constant = self.parameters["comoving_h"]
-            a_file = open(os.path.join(self.output_dir,'comoving_a'))
+            a_file = open(os.path.join(self.output_dir, 'comoving_a'))
             line = a_file.readline().strip()
             a_file.close()
             self.current_redshift = 1/float(line) - 1
@@ -491,7 +485,7 @@
             # So we'll try to determine this.
             vals = vals.split()
             if any(_scinot_finder.match(v) for v in vals):
-                vals = [float(v.replace("D","e").replace("d","e"))
+                vals = [float(v.replace("D", "e").replace("d", "e"))
                         for v in vals]
             if len(vals) == 1:
                 vals = vals[0]
@@ -509,22 +503,22 @@
         # call readline() if we want to end up with an offset at the very end.
         # Fortunately, elsewhere we don't care about the offset, so we're fine
         # everywhere else using iteration exclusively.
-        header_file = open(os.path.join(self.output_dir,'Header'))
+        header_file = open(os.path.join(self.output_dir, 'Header'))
         self.orion_version = header_file.readline().rstrip()
         n_fields = int(header_file.readline())
 
         self._field_list = [header_file.readline().strip()
-                           for i in range(n_fields)]
+                            for i in range(n_fields)]
 
         self.dimensionality = int(header_file.readline())
         self.current_time = float(header_file.readline())
         # This is traditionally a index attribute, so we will set it, but
         # in a slightly hidden variable.
-        self._max_level = int(header_file.readline()) 
+        self._max_level = int(header_file.readline())
         self.domain_left_edge = np.array(header_file.readline().split(),
                                          dtype="float64")
         self.domain_right_edge = np.array(header_file.readline().split(),
-                                         dtype="float64")
+                                          dtype="float64")
         ref_factors = np.array([int(i) for i in
                                 header_file.readline().split()])
         if ref_factors.size == 0:
@@ -540,26 +534,26 @@
             self.refine_by = min(ref_factors)
             # Check that they're all multiples of the minimum.
             if not all(float(rf)/self.refine_by ==
-                   int(float(rf)/self.refine_by) for rf in ref_factors):
+                       int(float(rf)/self.refine_by) for rf in ref_factors):
                 raise RuntimeError
             base_log = np.log2(self.refine_by)
-            self.level_offsets = [0] # level 0 has to have 0 offset
+            self.level_offsets = [0]  # level 0 has to have 0 offset
             lo = 0
             for lm1, rf in enumerate(self.ref_factors):
                 lo += int(np.log2(rf) / base_log) - 1
                 self.level_offsets.append(lo)
-        #assert(np.unique(ref_factors).size == 1)
+        # assert(np.unique(ref_factors).size == 1)
         else:
             self.refine_by = ref_factors[0]
             self.level_offsets = [0 for l in range(self._max_level + 1)]
-        # Now we read the global index space, to get 
+        # Now we read the global index space, to get
         index_space = header_file.readline()
         # This will be of the form:
         #  ((0,0,0) (255,255,255) (0,0,0)) ((0,0,0) (511,511,511) (0,0,0))
         # So note that if we split it all up based on spaces, we should be
         # fine, as long as we take the first two entries, which correspond to
         # the root level.  I'm not 100% pleased with this solution.
-        root_space = index_space.replace("(","").replace(")","").split()[:2]
+        root_space = index_space.replace("(", "").replace(")", "").split()[:2]
         start = np.array(root_space[0].split(","), dtype="int64")
         stop = np.array(root_space[1].split(","), dtype="int64")
         self.domain_dimensions = stop - start + 1
@@ -582,9 +576,9 @@
             raise RuntimeError("yt does not yet support spherical geometry")
 
         # overrides for 1/2-dimensional data
-        if self.dimensionality == 1: 
+        if self.dimensionality == 1:
             self._setup1d()
-        elif self.dimensionality == 2: 
+        elif self.dimensionality == 2:
             self._setup2d()
 
     def _set_code_unit_attributes(self):
@@ -594,20 +588,20 @@
         self.velocity_unit = self.quan(1.0, "cm/s")
 
     def _setup1d(self):
-#        self._index_class = BoxlibHierarchy1D
-#        self._fieldinfo_fallback = Orion1DFieldInfo
+        # self._index_class = BoxlibHierarchy1D
+        # self._fieldinfo_fallback = Orion1DFieldInfo
         self.domain_left_edge = \
             np.concatenate([self.domain_left_edge, [0.0, 0.0]])
         self.domain_right_edge = \
             np.concatenate([self.domain_right_edge, [1.0, 1.0]])
         tmp = self.domain_dimensions.tolist()
-        tmp.extend((1,1))
+        tmp.extend((1, 1))
         self.domain_dimensions = np.array(tmp)
         tmp = list(self.periodicity)
         tmp[1] = False
         tmp[2] = False
         self.periodicity = ensure_tuple(tmp)
-        
+
     def _setup2d(self):
         self.domain_left_edge = \
             np.concatenate([self.domain_left_edge, [0.0]])
@@ -636,12 +630,13 @@
         offset = self.level_offsets[l1] - self.level_offsets[l0]
         return self.refine_by**(l1-l0 + offset)
 
+
 class OrionHierarchy(BoxlibHierarchy):
-    
+
     def __init__(self, ds, dataset_type='orion_native'):
         BoxlibHierarchy.__init__(self, ds, dataset_type)
         self._read_particles()
-        #self.io = IOHandlerOrion
+        # self.io = IOHandlerOrion
 
     def _read_particles(self):
         """
@@ -673,7 +668,7 @@
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py
-                mask=np.ones(self.num_grids)
+                mask = np.ones(self.num_grids)
                 for i in xrange(len(coord)):
                     np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
                     np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
@@ -688,39 +683,42 @@
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
         return True
-                
+
+
 class OrionDataset(BoxlibDataset):
 
     _index_class = OrionHierarchy
 
     def __init__(self, output_dir,
-                 cparam_filename = "inputs",
-                 fparam_filename = "probin",
+                 cparam_filename="inputs",
+                 fparam_filename="probin",
                  dataset_type='orion_native',
-                 storage_filename = None):
+                 storage_filename=None):
 
         BoxlibDataset.__init__(self, output_dir,
-                 cparam_filename, fparam_filename, dataset_type)
-          
+                               cparam_filename, fparam_filename, dataset_type)
+
     @classmethod
     def _is_valid(cls, *args, **kwargs):
-        # fill our args                                                                               
+        # fill our args
         output_dir = args[0]
+        # boxlib datasets are always directories
+        if not os.path.isdir(output_dir): return False
         header_filename = os.path.join(output_dir, "Header")
         jobinfo_filename = os.path.join(output_dir, "job_info")
         if not os.path.exists(header_filename):
-            # We *know* it's not boxlib if Header doesn't exist.                                      
+            # We *know* it's not boxlib if Header doesn't exist.
             return False
         args = inspect.getcallargs(cls.__init__, args, kwargs)
-        # This might need to be localized somehow                                                     
+        # This might need to be localized somehow
         inputs_filename = os.path.join(
-                            os.path.dirname(os.path.abspath(output_dir)),
-                            args['cparam_filename'])
+            os.path.dirname(os.path.abspath(output_dir)),
+            args['cparam_filename'])
         if not os.path.exists(inputs_filename):
             return False
         if os.path.exists(jobinfo_filename):
             return False
-        # Now we check for all the others                                                             
+        # Now we check for all the others
         lines = open(inputs_filename).readlines()
         if any(("castro." in line for line in lines)): return False
         if any(("nyx." in line for line in lines)): return False
@@ -728,6 +726,7 @@
         if any(("geometry.prob_lo" in line for line in lines)): return True
         return False
 
+
 class CastroDataset(BoxlibDataset):
 
     _field_info_class = CastroFieldInfo
@@ -736,6 +735,8 @@
     def _is_valid(cls, *args, **kwargs):
         # fill our args
         output_dir = args[0]
+        # boxlib datasets are always directories
+        if not os.path.isdir(output_dir): return False
         header_filename = os.path.join(output_dir, "Header")
         jobinfo_filename = os.path.join(output_dir, "job_info")
         if not os.path.exists(header_filename):
@@ -748,6 +749,7 @@
         if any(line.startswith("Castro   ") for line in lines): return True
         return False
 
+
 class MaestroDataset(BoxlibDataset):
 
     _field_info_class = MaestroFieldInfo
@@ -756,6 +758,8 @@
     def _is_valid(cls, *args, **kwargs):
         # fill our args
         output_dir = args[0]
+        # boxlib datasets are always directories
+        if not os.path.isdir(output_dir): return False
         header_filename = os.path.join(output_dir, "Header")
         jobinfo_filename = os.path.join(output_dir, "job_info")
         if not os.path.exists(header_filename):
@@ -765,7 +769,7 @@
             return False
         # Now we check the job_info for the mention of maestro
         lines = open(jobinfo_filename).readlines()
-        if any("maestro" in line.lower() for line in lines): return True
+        if any(line.startswith("MAESTRO   ") for line in lines): return True
         return False
 
     def _parse_parameter_file(self):
@@ -782,7 +786,7 @@
                 line = f.next()
             # get the runtime parameters
             for line in f:
-                p, v = (_.strip() for _ in line[4:].split("=",1))
+                p, v = (_.strip() for _ in line[4:].split("=", 1))
                 if len(v) == 0:
                     self.parameters[p] = ""
                 else:
@@ -827,7 +831,7 @@
         maxlevel = int(header.readline()) # max level
 
         # Skip over how many grids on each level; this is degenerate
-        for i in range(maxlevel + 1):dummy = header.readline()
+        for i in range(maxlevel + 1): dummy = header.readline()
 
         grid_info = np.fromiter((int(i) for line in header.readlines()
                                  for i in line.split()),
@@ -844,6 +848,7 @@
 
         self.grid_particle_count[:, 0] = grid_info[:, 1]
 
+
 class NyxDataset(BoxlibDataset):
 
     _index_class = NyxHierarchy
@@ -852,6 +857,8 @@
     def _is_valid(cls, *args, **kwargs):
         # fill our args
         pname = args[0].rstrip("/")
+        # boxlib datasets are always directories
+        if not os.path.isdir(pname): return False
         dn = os.path.dirname(pname)
         if len(args) > 1:
             kwargs['paramFilename'] = args[1]
@@ -862,15 +869,13 @@
         # We check for the job_info file's existence because this is currently
         # what distinguishes Nyx data from MAESTRO data.
         pfn = os.path.join(pfname)
-        if not os.path.exists(pfn): return False
+        if not os.path.exists(pfn) or os.path.isdir(pfn): return False
         nyx = any(("nyx." in line for line in open(pfn)))
-        maestro = os.path.exists(os.path.join(pname, "job_info"))
-        orion = (not nyx) and (not maestro)
         return nyx
 
     def _parse_parameter_file(self):
         super(NyxDataset, self)._parse_parameter_file()
-        #return
+        # return
         # Nyx is always cosmological.
         self.cosmological_simulation = 1
         self.omega_lambda = self.parameters["comoving_OmL"]
@@ -904,7 +909,7 @@
     v = vals.split()[0] # Just in case there are multiple; we'll go
                         # back afterward to using vals.
     try:
-        float(v.upper().replace("D","E"))
+        float(v.upper().replace("D", "E"))
     except:
         pcast = str
         if v in ("F", "T"):

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/frontends/boxlib/tests/test_orion.py
--- a/yt/frontends/boxlib/tests/test_orion.py
+++ b/yt/frontends/boxlib/tests/test_orion.py
@@ -42,3 +42,8 @@
     for test in small_patch_amr(rt, _fields):
         test_radtube.__name__ = test.description
         yield test
+
+
+ at requires_file(rt)
+def test_OrionDataset():
+    assert isinstance(data_dir_load(rt), OrionDataset)

diff -r aa847dcf25e44f5aecc68949b8a3af6144b89707 -r f1bd4a3a0527102acf8ee12815648c843555afb4 yt/frontends/charm/api.py
--- a/yt/frontends/charm/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.charm
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-      CharmGrid, \
-      CharmHierarchy, \
-      CharmStaticOutput
-
-from .fields import \
-      CharmFieldInfo, \
-      add_charm_field
-
-from .io import \
-      IOHandlerCharmHDF5

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/f254dd511c83/
Changeset:   f254dd511c83
Branch:      yt
User:        atmyers
Date:        2014-09-21 22:15:06+00:00
Summary:     changeset: 13843:b0658872803e
branch: yt
parent: 13763:b5403ca197e7
user: Andrew Myers <atmyers2 at gmail.com>
date: Tue Sep 02 20:58:12 2014 -0700
files: yt/__init__.py yt/visualization/api.py yt/visualization/particle_plotter.py
description:

reverting out particle plot changes for now
Affected #:  3 files

diff -r f1bd4a3a0527102acf8ee12815648c843555afb4 -r f254dd511c836dae1f313cc1963d0721dd6b19e9 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -143,7 +143,7 @@
     apply_colormap, scale_image, write_projection, \
     SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
     ProjectionPlot, OffAxisProjectionPlot, \
-    show_colormaps, ProfilePlot, PhasePlot, ParticlePlot
+    show_colormaps, ProfilePlot, PhasePlot
 
 from yt.visualization.volume_rendering.api import \
     off_axis_projection, ColorTransferFunction, \

diff -r f1bd4a3a0527102acf8ee12815648c843555afb4 -r f254dd511c836dae1f313cc1963d0721dd6b19e9 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -52,9 +52,6 @@
     ProfilePlot, \
     PhasePlot
 
-from .particle_plotter import \
-    ParticlePlot
-    
 from .base_plot_types import \
     get_multi_plot
 

diff -r f1bd4a3a0527102acf8ee12815648c843555afb4 -r f254dd511c836dae1f313cc1963d0721dd6b19e9 yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ /dev/null
@@ -1,370 +0,0 @@
-"""
-This is a simple mechanism for interfacing with Particle plots
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-
-import __builtin__
-import base64
-import types
-
-from functools import wraps
-from itertools import izip
-import matplotlib
-import numpy as np
-import cStringIO
-
-from yt.utilities.exceptions import \
-    YTNotInsideNotebook
-from yt.utilities.logger import ytLogger as mylog
-import _mpl_imports as mpl
-from yt.funcs import \
-    ensure_list, \
-    get_image_suffix, \
-    get_ipython_api_version
-from yt.units.unit_object import Unit
-from .profile_plotter import \
-    get_canvas, \
-    invalidate_plot, \
-    sanitize_label
-
-class ParticlePlot(object):
-    r"""
-    Create a particle scatter plot from a data source.
-
-    Given a data object (all_data, region, sphere, etc.), an x field, 
-    and a y field (both of particle type), this will create a scatter
-    plot with one marker for each particle.
-
-    Parameters
-    ----------
-    data_source : AMR3DData Object
-        The data object to be profiled, such as all_data, region, or 
-        sphere.
-    x_field : str
-        The field to plot on the x-axis.
-    y_fields : str
-        The field to plot on the y-axis.
-    plot_spec : dict or list of dicts
-        A dictionary or list of dictionaries containing plot keyword 
-        arguments.  This will be passed to pyplot.plot. 
-        For example, dict(c='r', marker='.').
-        Default: dict(c='b', marker='.', linestyle='None', markersize=8)
-
-    Examples
-    --------
-
-    >>> import yt
-    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-    >>> ad = ds.all_data()
-    >>> plot = yt.ParticlePlot(ad, 'particle_position_x', 'particle_velocity_x')
-    >>> plot.save()
-
-    Use set_line_property to change line properties.
-    
-    """
-    x_log = None
-    y_log = None
-    z_log = None
-    x_title = None
-    y_title = None
-    x_lim = (None, None)
-    y_lim = (None, None)
-    _plot_valid = False
-
-    def __init__(self, data_source, x_field, y_field,
-                 label=None, plot_spec=None):
-
-        if plot_spec is None:
-            plot_spec = {'c':'b', 'marker':'.', 'linestyle':'None', 'markersize':8}
-
-        self.data_source = data_source
-        self.x_field = x_field
-        self.y_field = y_field
-        self.label = sanitize_label(label, 1)
-        self.plot_spec = plot_spec
-
-        self.x_data = self.data_source[x_field]
-        self.y_data = self.data_source[y_field]
-        
-        self.figure = mpl.matplotlib.figure.Figure((10, 8))
-        self.axis = self.figure.add_subplot(111)
-        self._setup_plots()
-
-    def save(self, name=None):
-        r"""
-         Saves the scatter plot to disk.
-
-         Parameters
-         ----------
-         name : str
-             The output file keyword.
-
-         """
-        if not self._plot_valid:
-            self._setup_plots()
-        if name is None:
-            prefix = self.data_source.ds
-            name = "%s.png" % prefix
-        suffix = get_image_suffix(name)
-        prefix = name[:name.rfind(suffix)]
-        xfn = self.x_field
-        if isinstance(xfn, types.TupleType):
-            xfn = xfn[1]
-        yfn = self.y_field
-        if isinstance(yfn, types.TupleType):
-            yfn = yfn[1]
-        if not suffix:
-            suffix = ".png"
-        canvas_cls = get_canvas(name)
-        canvas = canvas_cls(self.figure)
-        fn = "%s_ScatterPlot_%s_%s%s" % (prefix, xfn, yfn, suffix)
-        mylog.info("Saving %s", fn)
-        canvas.print_figure(fn)
-        return fn
-
-    def show(self):
-        r"""This will send any the plot to the IPython notebook.
-
-        If yt is being run from within an IPython session, and it is able to
-        determine this, this function will send the plot to the
-        notebook for display.
-
-        If yt can't determine if it's inside an IPython session, it will raise
-        YTNotInsideNotebook.
-
-        Examples
-        --------
-
-        >>> import yt
-        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-        >>> pp = ParticlePlot(ds.all_data(), 'particle_position_x', 'particle_position_y')
-        >>> pp.show()
-
-        """
-        if "__IPYTHON__" in dir(__builtin__):
-            api_version = get_ipython_api_version()
-            if api_version in ('0.10', '0.11'):
-                self._send_zmq()
-            else:
-                from IPython.display import display
-                display(self)
-        else:
-            raise YTNotInsideNotebook
-
-    def _repr_html_(self):
-        """Return an html representation of the plot object. Will display as a
-        png for each WindowPlotMPL instance in self.plots"""
-        ret = ''
-        canvas = mpl.FigureCanvasAgg(self.figure)
-        f = cStringIO.StringIO()
-        canvas.print_figure(f)
-        f.seek(0)
-        img = base64.b64encode(f.read())
-        ret += '<img src="data:image/png;base64,%s"><br>' % img
-        return ret
-
-    def _setup_plots(self):
-        self.axis.cla()
-        self.axis.plot(np.array(self.x_data), np.array(self.y_data),
-                       label=self.label, **self.plot_spec)
-
-        xscale, yscale = self._get_axis_log()
-        xtitle, ytitle = self._get_axis_titles()
-
-        self.axis.set_xscale(xscale)
-        self.axis.set_yscale(yscale)
-
-        self.axis.set_xlabel(xtitle)
-        self.axis.set_ylabel(ytitle)
-
-        self.axis.set_xlim(*self.x_lim)
-        self.axis.set_ylim(*self.y_lim)
-
-        if any(self.label):
-            self.axis.legend(loc="best")
-
-        self._plot_valid = True
-
-    @invalidate_plot
-    def set_line_property(self, property, value):
-        r"""
-        Set properties for the line on the plot.
-
-        Parameters
-        ----------
-        property : str
-            The line property to be set.
-        value : str, int, float
-            The value to set for the line property.
-
-        Examples
-        --------
-
-        plot.set_line_property("marker", "+")
-
-        
-        """
-        specs = self.plot_spec
-        specs[property] = value
-        return self
-
-    @invalidate_plot
-    def set_xlog(self, log):
-        """set the x-axis to log or linear.
-
-        Parameters
-        ----------
-
-        log : boolean
-            Log on/off.
-        """
-        self.x_log = log
-        return self
-
-    @invalidate_plot
-    def set_ylog(self, log):
-        """set the y-axis to log or linear.
-
-        Parameters
-        ----------
-
-        log : boolean
-            Log on/off.
-        """
-        self.y_log = log
-        return self
-    
-
-    @invalidate_plot
-    def set_unit(self, field, unit):
-        """Sets a new unit for the requested field
-
-        Parameters
-        ----------
-        field : string
-           The name of the field that is to be changed.
-
-        new_unit : string or Unit object
-           The name of the new unit.
-        """
-        if field == self.x_field:
-            self.x_data.convert_to_units(unit)
-        elif field == self.y_field:
-            self.y_data.convert_to_units(unit)
-        else:
-            raise KeyError("Field %s not in the plot!" % (field))
-        return self
-
-    @invalidate_plot
-    def set_xlim(self, xmin=None, xmax=None):
-        """Sets the limits of the x field
-
-        Parameters
-        ----------
-        
-        xmin : float or None
-          The new x minimum.  Defaults to None, which leaves the xmin
-          unchanged.
-
-        xmax : float or None
-          The new x maximum.  Defaults to None, which leaves the xmax
-          unchanged.
-
-        Examples
-        --------
-
-        >>> import yt
-        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-        >>> pp = yt.ParticlePlot(ds.all_data(), 'particle_position_x', 'particle_velocity_x')
-        >>> pp.set_xlim(0.1, 0.9)
-        >>> pp.save()
-
-        """
-        self.x_lim = (xmin, xmax)
-        return self
-
-    @invalidate_plot
-    def set_ylim(self, ymin=None, ymax=None):
-        """Sets the limits for the y-axis of the plot.
-
-        Parameters
-        ----------
-
-        ymin : float or None
-          The new y minimum.  Defaults to None, which leaves the ymin
-          unchanged.
-
-        ymax : float or None
-          The new y maximum.  Defaults to None, which leaves the ymax
-          unchanged.
-
-        Examples
-        --------
-
-        >>> import yt
-        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-        >>> pp = yt.ParticlePlot(ds.all_data(), 'particle_position_x', 'particle_velocity_x')
-        >>> pp.set_ylim(1e1, 1e8)
-        >>> pp.save()
-
-        """
-        self.y_lim = (ymin, ymax)
-        return self
-
-    def _get_axis_log(self):
-
-        xf, = self.data_source._determine_fields([self.x_field])
-        xfi = self.data_source.ds._get_field_info(*xf)
-        if self.x_log is None:
-            x_log = xfi.take_log
-        else:
-            x_log = self.x_log
-
-        yf, = self.data_source._determine_fields([self.y_field])
-        yfi = self.data_source.ds._get_field_info(*yf)
-        if self.y_log is None:
-            y_log = yfi.take_log
-        else:
-            y_log = self.y_log
-        
-        scales = {True: 'log', False: 'linear'}
-        return scales[x_log], scales[y_log]
-
-    def _get_field_label(self, field, field_info, field_unit):
-        field_unit = field_unit.latex_representation()
-        field_name = field_info.display_name
-        if isinstance(field, tuple): field = field[1]
-        if field_name is None:
-            field_name = r'$\rm{'+field+r'}$'
-            field_name = r'$\rm{'+field.replace('_','\/').title()+r'}$'
-        elif field_name.find('$') == -1:
-            field_name = field_name.replace(' ','\/')
-            field_name = r'$\rm{'+field_name+r'}$'
-        if field_unit is None or field_unit == '' or field_unit == '1':
-            label = field_name
-        else:
-            label = field_name+r'$\/\/('+field_unit+r')$'
-        return label
-
-    def _get_axis_titles(self):
-
-        xfi = self.data_source.ds._get_field_info(self.x_field)
-        x_unit = Unit(self.x_data.units, registry=self.data_source.ds.unit_registry)
-        x_title = self._get_field_label(self.x_field, xfi, x_unit)
-
-        yfi = self.data_source.ds._get_field_info(self.y_field)
-        y_unit = Unit(self.y_data.units, registry=self.data_source.ds.unit_registry)
-        y_title = self._get_field_label(self.y_field, yfi, y_unit)
-
-        return (x_title, y_title)


https://bitbucket.org/yt_analysis/yt/commits/a83fab8aac28/
Changeset:   a83fab8aac28
Branch:      yt
User:        atmyers
Date:        2014-09-22 23:39:47+00:00
Summary:     pointing the velocity_field_20.fits answer test to the correct location
Affected #:  1 file

diff -r f254dd511c836dae1f313cc1963d0721dd6b19e9 -r a83fab8aac28b1281162198575f4a9333a9ee9cd yt/frontends/fits/tests/test_outputs.py
--- a/yt/frontends/fits/tests/test_outputs.py
+++ b/yt/frontends/fits/tests/test_outputs.py
@@ -33,7 +33,7 @@
 
 _fields_vels = ("velocity_x","velocity_y","velocity_z")
 
-vf = "UniformGrid/velocity_field_20.fits"
+vf = "UnigridData/velocity_field_20.fits"
 @requires_ds(vf)
 def test_velocity_field():
     ds = data_dir_load(vf, cls=FITSDataset)


https://bitbucket.org/yt_analysis/yt/commits/ae5be84f21d1/
Changeset:   ae5be84f21d1
Branch:      yt
User:        atmyers
Date:        2014-09-22 23:43:14+00:00
Summary:     not sure how that got removed
Affected #:  1 file

diff -r a83fab8aac28b1281162198575f4a9333a9ee9cd -r ae5be84f21d1129b5718c8192f376e5ae9a97425 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -150,7 +150,7 @@
     TransferFunctionHelper
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    parallel_objects, enable_parallelism
+    parallel_objects, enable_parallelism, communication_system
 
 from yt.convenience import \
     load, simulation


https://bitbucket.org/yt_analysis/yt/commits/5ad64a6cc689/
Changeset:   5ad64a6cc689
Branch:      yt
User:        ngoldbaum
Date:        2014-09-23 16:59:50+00:00
Summary:     Merged in atmyers/yt (pull request #1220)

Restoring an answer test for the fits frontend
Affected #:  3 files

diff -r ea3ba359275aadaa72aeb5543bef652a6e364b16 -r 5ad64a6cc689da8686018c5d10d68a9ca67f10d2 yt/frontends/fits/tests/test_outputs.py
--- a/yt/frontends/fits/tests/test_outputs.py
+++ b/yt/frontends/fits/tests/test_outputs.py
@@ -33,7 +33,7 @@
 
 _fields_vels = ("velocity_x","velocity_y","velocity_z")
 
-vf = "UniformGrid/velocity_field_20.fits"
+vf = "UnigridData/velocity_field_20.fits"
 @requires_ds(vf)
 def test_velocity_field():
     ds = data_dir_load(vf, cls=FITSDataset)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list