[yt-svn] commit/yt: 33 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Nov 16 11:15:58 PST 2015


33 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/a05e6a716571/
Changeset:   a05e6a716571
Branch:      yt
User:        ngoldbaum
Date:        2015-11-04 21:50:54+00:00
Summary:     Removing boolean data objects

These are currently untested, not being used, and would take a lot of work
to get working again. Rather than leaving the dead code in the codebase I am
removing it in the interest of code clarity. If someone wants to bring it back
they can use the VCS history to restore the code.
Affected #:  4 files

diff -r f264885c3fb49044ba653bf1b29e2eb46e99db24 -r a05e6a7165714d6b4e4e049dc1310c77a7adc30c yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1467,167 +1467,3 @@
     obj = cls(*new_args)
     obj.field_parameters.update(field_parameters)
     return ReconstructedObject((ds, obj))
-
-class YTBooleanRegionBase(YTSelectionContainer3D):
-    """
-    This will build a hybrid region based on the boolean logic
-    of the regions.
-
-    Parameters
-    ----------
-    regions : list
-        A list of region objects and strings describing the boolean logic
-        to use when building the hybrid region. The boolean logic can be
-        nested using parentheses.
-
-    Examples
-    --------
-    >>> re1 = ds.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4],
-        [0.6, 0.6, 0.6])
-    >>> re2 = ds.region([0.5, 0.5, 0.5], [0.45, 0.45, 0.45],
-        [0.55, 0.55, 0.55])
-    >>> sp1 = ds.sphere([0.575, 0.575, 0.575], .03)
-    >>> toroid_shape = ds.boolean([re1, "NOT", re2])
-    >>> toroid_shape_with_hole = ds.boolean([re1, "NOT", "(", re2, "OR",
-        sp1, ")"])
-    """
-    _type_name = "boolean"
-    _con_args = ("regions",)
-    def __init__(self, regions, fields = None, ds = None, field_parameters = None, data_source = None):
-        # Center is meaningless, but we'll define it all the same.
-        YTSelectionContainer3D.__init__(self, [0.5]*3, fields, ds, field_parameters, data_source)
-        self.regions = regions
-        self._all_regions = []
-        self._some_overlap = []
-        self._all_overlap = []
-        self._cut_masks = {}
-        self._get_all_regions()
-        self._make_overlaps()
-        self._get_list_of_grids()
-
-    def _get_all_regions(self):
-        # Before anything, we simply find out which regions are involved in all
-        # of this process, uniquely.
-        for item in self.regions:
-            if isinstance(item, bytes): continue
-            self._all_regions.append(item)
-            # So cut_masks don't get messed up.
-            item._boolean_touched = True
-        self._all_regions = np.unique(self._all_regions)
-
-    def _make_overlaps(self):
-        # Using the processed cut_masks, we'll figure out what grids
-        # are left in the hybrid region.
-        pbar = get_pbar("Building boolean", len(self._all_regions))
-        for i, region in enumerate(self._all_regions):
-            try:
-                region._get_list_of_grids() # This is no longer supported.
-                alias = region
-            except AttributeError:
-                alias = region.data         # This is no longer supported.
-            for grid in alias._grids:
-                if grid in self._some_overlap or grid in self._all_overlap:
-                    continue
-                # Get the cut_mask for this grid in this region, and see
-                # if there's any overlap with the overall cut_mask.
-                overall = self._get_cut_mask(grid)
-                local = force_array(alias._get_cut_mask(grid),
-                    grid.ActiveDimensions)
-                # Below we don't want to match empty masks.
-                if overall.sum() == 0 and local.sum() == 0: continue
-                # The whole grid is in the hybrid region if a) its cut_mask
-                # in the original region is identical to the new one and b)
-                # the original region cut_mask is all ones.
-                if (local == np.bitwise_and(overall, local)).all() and \
-                        (local == True).all():
-                    self._all_overlap.append(grid)
-                    continue
-                if (overall == local).any():
-                    # Some of local is in overall
-                    self._some_overlap.append(grid)
-                    continue
-            pbar.update(i)
-        pbar.finish()
-
-    def __repr__(self):
-        # We'll do this the slow way to be clear what's going on
-        s = "%s (%s): " % (self.__class__.__name__, self.ds)
-        s += "["
-        for i, region in enumerate(self.regions):
-            if region in ["OR", "AND", "NOT", "(", ")"]:
-                s += region
-            else:
-                s += region.__repr__()
-            if i < (len(self.regions) - 1): s += ", "
-        s += "]"
-        return s
-
-    def _is_fully_enclosed(self, grid):
-        return (grid in self._all_overlap)
-
-    def _get_list_of_grids(self):
-        self._grids = np.array(self._some_overlap + self._all_overlap,
-            dtype='object')
-
-    def _get_cut_mask(self, grid, field=None):
-        if self._is_fully_enclosed(grid):
-            return True # We do not want child masking here
-        if grid.id in self._cut_masks:
-            return self._cut_masks[grid.id]
-        # If we get this far, we have to generate the cut_mask.
-        return self._get_level_mask(self.regions, grid)
-
-    def _get_level_mask(self, ops, grid):
-        level_masks = []
-        end = 0
-        for i, item in enumerate(ops):
-            if end > 0 and i < end:
-                # We skip over things inside parentheses on this level.
-                continue
-            if isinstance(item, YTDataContainer):
-                # Add this regions cut_mask to level_masks
-                level_masks.append(force_array(item._get_cut_mask(grid),
-                    grid.ActiveDimensions))
-            elif item == "AND" or item == "NOT" or item == "OR":
-                level_masks.append(item)
-            elif item == "(":
-                # recurse down, and we'll append the results, which
-                # should be a single cut_mask
-                open_count = 0
-                for ii, item in enumerate(ops[i + 1:]):
-                    # We look for the matching closing parentheses to find
-                    # where we slice ops.
-                    if item == "(":
-                        open_count += 1
-                    if item == ")" and open_count > 0:
-                        open_count -= 1
-                    elif item == ")" and open_count == 0:
-                        end = i + ii + 1
-                        break
-                level_masks.append(force_array(self._get_level_mask(ops[i + 1:end],
-                    grid), grid.ActiveDimensions))
-                end += 1
-            elif isinstance(item.data, AMRData):
-                level_masks.append(force_array(item.data._get_cut_mask(grid),
-                    grid.ActiveDimensions))
-            else:
-                mylog.error("Item in the boolean construction unidentified.")
-        # Now we do the logic on our level_mask.
-        # There should be no nested logic anymore.
-        # The first item should be a cut_mask,
-        # so that will be our starting point.
-        this_cut_mask = level_masks[0]
-        for i, item in enumerate(level_masks):
-            # I could use a slice above, but I'll keep i consistent instead.
-            if i == 0: continue
-            if item == "AND":
-                # So, the next item in level_masks we want to AND.
-                np.bitwise_and(this_cut_mask, level_masks[i+1], this_cut_mask)
-            if item == "NOT":
-                # It's convenient to remember that NOT == AND NOT
-                np.bitwise_and(this_cut_mask, np.invert(level_masks[i+1]),
-                    this_cut_mask)
-            if item == "OR":
-                np.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
-        self._cut_masks[grid.id] = this_cut_mask
-        return this_cut_mask

diff -r f264885c3fb49044ba653bf1b29e2eb46e99db24 -r a05e6a7165714d6b4e4e049dc1310c77a7adc30c yt/data_objects/tests/test_boolean_regions.py
--- a/yt/data_objects/tests/test_boolean_regions.py
+++ /dev/null
@@ -1,361 +0,0 @@
-from yt.testing import *
-from yt.fields.local_fields import add_field
-from yt.units.yt_array import YTArray
-
-def setup():
-    return
-    from yt.config import ytcfg
-    ytcfg["yt","__withintesting"] = "True"
-    def _ID(field, data):
-        width = data.ds.domain_right_edge - data.ds.domain_left_edge
-        min_dx = YTArray(1.0/8192, input_units='code_length',
-                         registry=data.ds.unit_registry)
-        delta = width / min_dx
-        x = data['x'] - min_dx / 2.
-        y = data['y'] - min_dx / 2.
-        z = data['z'] - min_dx / 2.
-        xi = x / min_dx
-        yi = y / min_dx
-        zi = z / min_dx
-        index = xi + delta[0] * (yi + delta[1] * zi)
-        index = index.astype('int64')
-        return index
-
-    add_field("ID", function=_ID)
-
-def test_boolean_spheres_no_overlap():
-    r"""Test to make sure that boolean objects (spheres, no overlap)
-    behave the way we expect.
-
-    Test non-overlapping spheres. This also checks that the original spheres
-    don't change as part of constructing the booleans.
-    """
-    return
-    for n in [1, 2, 4, 8]:
-        ds = fake_random_ds(64, nprocs=n)
-        ds.index
-        sp1 = ds.sphere([0.25, 0.25, 0.25], 0.15)
-        sp2 = ds.sphere([0.75, 0.75, 0.75], 0.15)
-        # Store the original indices
-        i1 = sp1['ID']
-        i1.sort()
-        i2 = sp2['ID']
-        i2.sort()
-        ii = np.concatenate((i1, i2))
-        ii.sort()
-        # Make some booleans
-        bo1 = ds.boolean([sp1, "AND", sp2]) # empty
-        bo2 = ds.boolean([sp1, "NOT", sp2]) # only sp1
-        bo3 = ds.boolean([sp1, "OR", sp2]) # combination
-        # This makes sure the original containers didn't change.
-        new_i1 = sp1['ID']
-        new_i1.sort()
-        new_i2 = sp2['ID']
-        new_i2.sort()
-        yield assert_array_equal, new_i1, i1
-        yield assert_array_equal, new_i2, i2
-        # Now make sure the indices also behave as we expect.
-        empty = np.array([])
-        yield assert_array_equal, bo1['ID'], empty
-        b2 = bo2['ID']
-        b2.sort()
-        yield assert_array_equal, b2, i1
-        b3 = bo3['ID']
-        b3.sort()
-        yield assert_array_equal, b3, ii
- 
-def test_boolean_spheres_overlap():
-    r"""Test to make sure that boolean objects (spheres, overlap)
-    behave the way we expect.
-
-    Test overlapping spheres.
-    """
-    return
-    for n in [1, 2, 4, 8]:
-        ds = fake_random_ds(64, nprocs=n)
-        ds.index
-        sp1 = ds.sphere([0.45, 0.45, 0.45], 0.15)
-        sp2 = ds.sphere([0.55, 0.55, 0.55], 0.15)
-        # Get indices of both.
-        i1 = sp1['ID']
-        i2 = sp2['ID']
-        # Make some booleans
-        bo1 = ds.boolean([sp1, "AND", sp2]) # overlap (a lens)
-        bo2 = ds.boolean([sp1, "NOT", sp2]) # sp1 - sp2 (sphere with bite)
-        bo3 = ds.boolean([sp1, "OR", sp2]) # combination (H2)
-        # Now make sure the indices also behave as we expect.
-        lens = np.intersect1d(i1, i2)
-        apple = np.setdiff1d(i1, i2)
-        both = np.union1d(i1, i2)
-        b1 = bo1['ID']
-        b1.sort()
-        b2 = bo2['ID']
-        b2.sort()
-        b3 = bo3['ID']
-        b3.sort()
-        yield assert_array_equal, b1, lens
-        yield assert_array_equal, b2, apple
-        yield assert_array_equal, b3, both
-
-def test_boolean_regions_no_overlap():
-    r"""Test to make sure that boolean objects (regions, no overlap)
-    behave the way we expect.
-
-    Test non-overlapping regions. This also checks that the original regions
-    don't change as part of constructing the booleans.
-    """
-    return
-    for n in [1, 2, 4, 8]:
-        ds = fake_random_ds(64, nprocs=n)
-        ds.index
-        re1 = ds.region([0.25]*3, [0.2]*3, [0.3]*3)
-        re2 = ds.region([0.65]*3, [0.6]*3, [0.7]*3)
-        # Store the original indices
-        i1 = re1['ID']
-        i1.sort()
-        i2 = re2['ID']
-        i2.sort()
-        ii = np.concatenate((i1, i2))
-        ii.sort()
-        # Make some booleans
-        bo1 = ds.boolean([re1, "AND", re2]) # empty
-        bo2 = ds.boolean([re1, "NOT", re2]) # only re1
-        bo3 = ds.boolean([re1, "OR", re2]) # combination
-        # This makes sure the original containers didn't change.
-        new_i1 = re1['ID']
-        new_i1.sort()
-        new_i2 = re2['ID']
-        new_i2.sort()
-        yield assert_array_equal, new_i1, i1
-        yield assert_array_equal, new_i2, i2
-        # Now make sure the indices also behave as we expect.
-        empty = np.array([])
-        yield assert_array_equal, bo1['ID'], empty
-        b2 = bo2['ID']
-        b2.sort()
-        yield assert_array_equal, b2, i1 
-        b3 = bo3['ID']
-        b3.sort()
-        yield assert_array_equal, b3, ii
-
-def test_boolean_regions_overlap():
-    r"""Test to make sure that boolean objects (regions, overlap)
-    behave the way we expect.
-
-    Test overlapping regions.
-    """
-    return
-    for n in [1, 2, 4, 8]:
-        ds = fake_random_ds(64, nprocs=n)
-        ds.index
-        re1 = ds.region([0.55]*3, [0.5]*3, [0.6]*3)
-        re2 = ds.region([0.6]*3, [0.55]*3, [0.65]*3)
-        # Get indices of both.
-        i1 = re1['ID']
-        i2 = re2['ID']
-        # Make some booleans
-        bo1 = ds.boolean([re1, "AND", re2]) # overlap (small cube)
-        bo2 = ds.boolean([re1, "NOT", re2]) # sp1 - sp2 (large cube with bite)
-        bo3 = ds.boolean([re1, "OR", re2]) # combination (merged large cubes)
-        # Now make sure the indices also behave as we expect.
-        cube = np.intersect1d(i1, i2)
-        bite_cube = np.setdiff1d(i1, i2)
-        both = np.union1d(i1, i2)
-        b1 = bo1['ID']
-        b1.sort()
-        b2 = bo2['ID']
-        b2.sort()
-        b3 = bo3['ID']
-        b3.sort()
-        yield assert_array_equal, b1, cube
-        yield assert_array_equal, b2, bite_cube
-        yield assert_array_equal, b3, both
-
-def test_boolean_cylinders_no_overlap():
-    r"""Test to make sure that boolean objects (cylinders, no overlap)
-    behave the way we expect.
-
-    Test non-overlapping cylinders. This also checks that the original cylinders
-    don't change as part of constructing the booleans.
-    """
-    return
-    for n in [1, 2, 4, 8]:
-        ds = fake_random_ds(64, nprocs=n)
-        ds.index
-        cyl1 = ds.disk([0.25]*3, [1, 0, 0], 0.1, 0.1)
-        cyl2 = ds.disk([0.75]*3, [1, 0, 0], 0.1, 0.1)
-        # Store the original indices
-        i1 = cyl1['ID']
-        i1.sort()
-        i2 = cyl2['ID']
-        i2.sort()
-        ii = np.concatenate((i1, i2))
-        ii.sort()
-        # Make some booleans
-        bo1 = ds.boolean([cyl1, "AND", cyl2]) # empty
-        bo2 = ds.boolean([cyl1, "NOT", cyl2]) # only cyl1
-        bo3 = ds.boolean([cyl1, "OR", cyl2]) # combination
-        # This makes sure the original containers didn't change.
-        new_i1 = cyl1['ID']
-        new_i1.sort()
-        new_i2 = cyl2['ID']
-        new_i2.sort()
-        yield assert_array_equal, new_i1, i1
-        yield assert_array_equal, new_i2, i2
-        # Now make sure the indices also behave as we expect.
-        empty = np.array([])
-        yield assert_array_equal, bo1['ID'], empty
-        b2 = bo2['ID']
-        b2.sort()
-        yield assert_array_equal, b2, i1
-        b3 = bo3['ID']
-        b3.sort()
-        yield assert_array_equal, b3, ii
-
-def test_boolean_cylinders_overlap():
-    r"""Test to make sure that boolean objects (cylinders, overlap)
-    behave the way we expect.
-
-    Test overlapping cylinders.
-    """
-    return
-    for n in [1, 2, 4, 8]:
-        ds = fake_random_ds(64, nprocs=n)
-        ds.index
-        cyl1 = ds.disk([0.45]*3, [1, 0, 0], 0.2, 0.2)
-        cyl2 = ds.disk([0.55]*3, [1, 0, 0], 0.2, 0.2)
-        # Get indices of both.
-        i1 = cyl1['ID']
-        i2 = cyl2['ID']
-        # Make some booleans
-        bo1 = ds.boolean([cyl1, "AND", cyl2]) # overlap (vertically extened lens)
-        bo2 = ds.boolean([cyl1, "NOT", cyl2]) # sp1 - sp2 (disk minus a bite)
-        bo3 = ds.boolean([cyl1, "OR", cyl2]) # combination (merged disks)
-        # Now make sure the indices also behave as we expect.
-        vlens = np.intersect1d(i1, i2)
-        bite_disk = np.setdiff1d(i1, i2)
-        both = np.union1d(i1, i2)
-        b1 = bo1['ID']
-        b1.sort()
-        b2 = bo2['ID']
-        b2.sort()
-        b3 = bo3['ID']
-        b3.sort()
-        yield assert_array_equal, b1, vlens
-        yield assert_array_equal, b2, bite_disk
-        yield assert_array_equal, b3, both
-
-def test_boolean_ellipsoids_no_overlap():
-    r"""Test to make sure that boolean objects (ellipsoids, no overlap)
-    behave the way we expect.
-
-    Test non-overlapping ellipsoids. This also checks that the original
-    ellipsoids don't change as part of constructing the booleans.
-    """
-    return
-    for n in [1, 2, 4, 8]:
-        ds = fake_random_ds(64, nprocs=n)
-        ds.index
-        ell1 = ds.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
-        ell2 = ds.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
-        # Store the original indices
-        i1 = ell1['ID']
-        i1.sort()
-        i2 = ell2['ID']
-        i2.sort()
-        ii = np.concatenate((i1, i2))
-        ii.sort()
-        # Make some booleans
-        bo1 = ds.boolean([ell1, "AND", ell2]) # empty
-        bo2 = ds.boolean([ell1, "NOT", ell2]) # only cyl1
-        bo3 = ds.boolean([ell1, "OR", ell2]) # combination
-        # This makes sure the original containers didn't change.
-        new_i1 = ell1['ID']
-        new_i1.sort()
-        new_i2 = ell2['ID']
-        new_i2.sort()
-        yield assert_array_equal, new_i1, i1 
-        yield assert_array_equal, new_i2, i2
-        # Now make sure the indices also behave as we expect.
-        empty = np.array([])
-        yield assert_array_equal, bo1['ID'], empty
-        b2 = bo2['ID']
-        b2.sort()
-        yield assert_array_equal, b2, i1
-        b3 = bo3['ID']
-        b3.sort()
-        yield assert_array_equal, b3, ii
-
-def test_boolean_ellipsoids_overlap():
-    r"""Test to make sure that boolean objects (ellipsoids, overlap)
-    behave the way we expect.
-
-    Test overlapping ellipsoids.
-    """
-    return
-    for n in [1, 2, 4, 8]:
-        ds = fake_random_ds(64, nprocs=n)
-        ds.index
-        ell1 = ds.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
-        ell2 = ds.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
-        # Get indices of both.
-        i1 = ell1['ID']
-        i2 = ell2['ID']
-        # Make some booleans
-        bo1 = ds.boolean([ell1, "AND", ell2]) # overlap
-        bo2 = ds.boolean([ell1, "NOT", ell2]) # ell1 - ell2
-        bo3 = ds.boolean([ell1, "OR", ell2]) # combination
-        # Now make sure the indices also behave as we expect.
-        overlap = np.intersect1d(i1, i2)
-        diff = np.setdiff1d(i1, i2)
-        both = np.union1d(i1, i2)
-        b1 = bo1['ID']
-        b1.sort()
-        b2 = bo2['ID']
-        b2.sort()
-        b3 = bo3['ID']
-        b3.sort()
-        yield assert_array_equal, b1, overlap
-        yield assert_array_equal, b2, diff
-        yield assert_array_equal, b3, both
-
-def test_boolean_mix_periodicity():
-    r"""Test that a hybrid boolean region behaves as we expect.
-
-    This also tests nested logic and that periodicity works.
-    """
-    return
-    for n in [1, 2, 4, 8]:
-        ds = fake_random_ds(64, nprocs=n)
-        ds.index
-        re = ds.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
-        sp = ds.sphere([0.95]*3, 0.3) # wraps around
-        cyl = ds.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around
-        # Get original indices
-        rei = re['ID']
-        spi = sp['ID']
-        cyli = cyl['ID']
-        # Make some booleans
-        # whole box minux spherical bites at corners
-        bo1 = ds.boolean([re, "NOT", sp])
-        # sphere plus cylinder
-        bo2 = ds.boolean([sp, "OR", cyl])
-        # a jumble, the region minus the sp+cyl
-        bo3 = ds.boolean([re, "NOT", "(", sp, "OR", cyl, ")"])
-        # Now make sure the indices also behave as we expect.
-        expect = np.setdiff1d(rei, spi)
-        ii = bo1['ID']
-        ii.sort()
-        yield assert_array_equal, expect, ii
-        #
-        expect = np.union1d(spi, cyli)
-        ii = bo2['ID']
-        ii.sort()
-        yield assert_array_equal, expect, ii
-        #
-        expect = np.union1d(spi, cyli)
-        expect = np.setdiff1d(rei, expect)
-        ii = bo3['ID']
-        ii.sort()
-        yield assert_array_equal, expect, ii
-

diff -r f264885c3fb49044ba653bf1b29e2eb46e99db24 -r a05e6a7165714d6b4e4e049dc1310c77a7adc30c yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -35,14 +35,6 @@
     TestHaloCompositionHashFOF, \
     TestHaloCompositionHashPHOP
 
-from .boolean_region_tests import \
-    TestBooleanANDGridQuantity, \
-    TestBooleanORGridQuantity, \
-    TestBooleanNOTGridQuantity, \
-    TestBooleanANDParticleQuantity, \
-    TestBooleanORParticleQuantity, \
-    TestBooleanNOTParticleQuantity
-
 try:
     from .framework import AnswerTesting
 except ImportError:

diff -r f264885c3fb49044ba653bf1b29e2eb46e99db24 -r a05e6a7165714d6b4e4e049dc1310c77a7adc30c yt/utilities/answer_testing/boolean_region_tests.py
--- a/yt/utilities/answer_testing/boolean_region_tests.py
+++ /dev/null
@@ -1,166 +0,0 @@
-from __future__ import absolute_import
-from yt.mods import *
-import matplotlib
-import pylab
-from .output_tests import SingleOutputTest, YTDatasetTest, create_test
-import hashlib
-import numpy as np
-
-# Tests to make sure that grid quantities are identical that should
-# be identical for the AND operator.
-class TestBooleanANDGridQuantity(YTDatasetTest):
-    def run(self):
-        domain = self.ds.domain_right_edge - self.ds.domain_left_edge
-        four = 0.4 * domain + self.ds.domain_left_edge
-        five = 0.5 * domain + self.ds.domain_left_edge
-        six = 0.6 * domain + self.ds.domain_left_edge
-        re1 = self.ds.region(five, four, six)
-        re2 = self.ds.region(five, five, six)
-        re = self.ds.boolean([re1, "AND", re2])
-        # re should look like re2.
-        x2 = re2['x']
-        x = re['x']
-        x2 = x2[x2.argsort()]
-        x = x[x.argsort()]
-        self.result = (x2, x)
-    
-    def compare(self, old_result):
-        self.compare_array_delta(self.result[0], self.result[1], 1e-10)
-    
-    def plot(self):
-        return []
-
-# OR
-class TestBooleanORGridQuantity(YTDatasetTest):
-    def run(self):
-        domain = self.ds.domain_right_edge - self.ds.domain_left_edge
-        four = 0.4 * domain + self.ds.domain_left_edge
-        five = 0.5 * domain + self.ds.domain_left_edge
-        six = 0.6 * domain + self.ds.domain_left_edge
-        re1 = self.ds.region(five, four, six)
-        re2 = self.ds.region(five, five, six)
-        re = self.ds.boolean([re1, "OR", re2])
-        # re should look like re1
-        x1 = re1['x']
-        x = re['x']
-        x1 = x1[x1.argsort()]
-        x = x[x.argsort()]
-        self.result = (x1, x)
-    
-    def compare(self, old_result):
-        self.compare_array_delta(self.result[0], self.result[1], 1e-10)
-    
-    def plot(self):
-        return []
-
-# NOT
-class TestBooleanNOTGridQuantity(YTDatasetTest):
-    def run(self):
-        domain = self.ds.domain_right_edge - self.ds.domain_left_edge
-        four = 0.4 * domain + self.ds.domain_left_edge
-        five = 0.5 * domain + self.ds.domain_left_edge
-        six = 0.6 * domain + self.ds.domain_left_edge
-        re1 = self.ds.region(five, four, six)
-        re2 = self.ds.region(five, five, six)
-        # Bottom base
-        re3 = self.ds.region(five, four, [six[0], six[1], five[2]])
-        # Side
-        re4 = self.ds.region(five, [four[0], four[1], five[2]],
-            [five[0], six[1], six[2]])
-        # Last small cube
-        re5 = self.ds.region(five, [five[0], four[0], four[2]],
-            [six[0], five[1], six[2]])
-        # re1 NOT re2 should look like re3 OR re4 OR re5
-        re = self.ds.boolean([re1, "NOT", re2])
-        reo = self.ds.boolean([re3, "OR", re4, "OR", re5])
-        x = re['x']
-        xo = reo['x']
-        x = x[x.argsort()]
-        xo = xo[xo.argsort()]
-        self.result = (x, xo)
-    
-    def compare(self, old_result):
-        self.compare_array_delta(self.result[0], self.result[1], 1e-10)
-    
-    def plot(self):
-        return []
-
-# Tests to make sure that particle quantities are identical that should
-# be identical for the AND operator.
-class TestBooleanANDParticleQuantity(YTDatasetTest):
-    def run(self):
-        domain = self.ds.domain_right_edge - self.ds.domain_left_edge
-        four = 0.4 * domain + self.ds.domain_left_edge
-        five = 0.5 * domain + self.ds.domain_left_edge
-        six = 0.6 * domain + self.ds.domain_left_edge
-        re1 = self.ds.region(five, four, six)
-        re2 = self.ds.region(five, five, six)
-        re = self.ds.boolean([re1, "AND", re2])
-        # re should look like re2.
-        x2 = re2['particle_position_x']
-        x = re['particle_position_x']
-        x2 = x2[x2.argsort()]
-        x = x[x.argsort()]
-        self.result = (x2, x)
-    
-    def compare(self, old_result):
-        self.compare_array_delta(self.result[0], self.result[1], 1e-10)
-    
-    def plot(self):
-        return []
-
-# OR
-class TestBooleanORParticleQuantity(YTDatasetTest):
-    def run(self):
-        domain = self.ds.domain_right_edge - self.ds.domain_left_edge
-        four = 0.4 * domain + self.ds.domain_left_edge
-        five = 0.5 * domain + self.ds.domain_left_edge
-        six = 0.6 * domain + self.ds.domain_left_edge
-        re1 = self.ds.region(five, four, six)
-        re2 = self.ds.region(five, five, six)
-        re = self.ds.boolean([re1, "OR", re2])
-        # re should look like re1
-        x1 = re1['particle_position_x']
-        x = re['particle_position_x']
-        x1 = x1[x1.argsort()]
-        x = x[x.argsort()]
-        self.result = (x1, x)
-    
-    def compare(self, old_result):
-        self.compare_array_delta(self.result[0], self.result[1], 1e-10)
-    
-    def plot(self):
-        return []
-
-# NOT
-class TestBooleanNOTParticleQuantity(YTDatasetTest):
-    def run(self):
-        domain = self.ds.domain_right_edge - self.ds.domain_left_edge
-        four = 0.4 * domain + self.ds.domain_left_edge
-        five = 0.5 * domain + self.ds.domain_left_edge
-        six = 0.6 * domain + self.ds.domain_left_edge
-        re1 = self.ds.region(five, four, six)
-        re2 = self.ds.region(five, five, six)
-        # Bottom base
-        re3 = self.ds.region(five, four, [six[0], six[1], five[2]])
-        # Side
-        re4 = self.ds.region(five, [four[0], four[1], five[2]],
-            [five[0], six[1], six[2]])
-        # Last small cube
-        re5 = self.ds.region(five, [five[0], four[0], four[2]],
-            [six[0], five[1], six[2]])
-        # re1 NOT re2 should look like re3 OR re4 OR re5
-        re = self.ds.boolean([re1, "NOT", re2])
-        reo = self.ds.boolean([re3, "OR", re4, "OR", re5])
-        x = re['particle_position_x']
-        xo = reo['particle_position_x']
-        x = x[x.argsort()]
-        xo = xo[xo.argsort()]
-        self.result = (x, xo)
-    
-    def compare(self, old_result):
-        self.compare_array_delta(self.result[0], self.result[1], 1e-10)
-    
-    def plot(self):
-        return []
-


https://bitbucket.org/yt_analysis/yt/commits/84f2a7849d3d/
Changeset:   84f2a7849d3d
Branch:      yt
User:        ngoldbaum
Date:        2015-11-04 21:56:29+00:00
Summary:     Removing BinnedProfile code

This code is untested, deprecated, and known to no longer function correctly
under yt-3. Rather than confusing users by still allowing them to import it,
I'm removing it completely here. People should use create_profile and ProfileND
subclasses instead.
Affected #:  2 files

diff -r a05e6a7165714d6b4e4e049dc1310c77a7adc30c -r 84f2a7849d3d0c74ad581109954fce6230d820d9 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -24,8 +24,8 @@
 from yt.units.unit_object import Unit
 from yt.data_objects.data_containers import YTFieldData
 from yt.utilities.lib.misc_utilities import \
-    bin_profile1d, bin_profile2d, bin_profile3d, \
-    new_bin_profile1d, new_bin_profile2d, \
+    new_bin_profile1d, \
+    new_bin_profile2d, \
     new_bin_profile3d
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_objects
@@ -51,704 +51,6 @@
         return tr
     return save_state
 
-# Note we do not inherit from EnzoData.
-# We could, but I think we instead want to deal with the root datasource.
-class BinnedProfile(ParallelAnalysisInterface):
-    def __init__(self, data_source):
-        ParallelAnalysisInterface.__init__(self)
-        self._data_source = data_source
-        self.ds = data_source.ds
-        self.field_data = YTFieldData()
-
-    @property
-    def index(self):
-        return self.ds.index
-
-    def _get_dependencies(self, fields):
-        return ParallelAnalysisInterface._get_dependencies(
-                    self, fields + self._get_bin_fields())
-
-    def add_fields(self, fields, weight = "cell_mass", accumulation = False, fractional=False):
-        """
-        We accept a list of *fields* which will be binned if *weight* is not
-        None and otherwise summed.  *accumulation* determines whether or not
-        they will be accumulated from low to high along the appropriate axes.
-        """
-        # Note that the specification has to be the same for all of these
-        fields = ensure_list(fields)
-        data = {}         # final results will go here
-        weight_data = {}  # we need to track the weights as we go
-        std_data = {}
-        for field in fields:
-            data[field] = self._get_empty_field()
-            weight_data[field] = self._get_empty_field()
-            std_data[field] = self._get_empty_field()
-        used = self._get_empty_field().astype('bool')
-        chunk_fields = fields[:]
-        if weight is not None: chunk_fields += [weight]
-        #pbar = get_pbar('Binning grids', len(self._data_source._grids))
-        for ds in self._data_source.chunks(chunk_fields, chunking_style = "io"):
-            try:
-                args = self._get_bins(ds, check_cut=True)
-            except YTEmptyProfileData:
-                # No bins returned for this grid, so forget it!
-                continue
-            for field in fields:
-                # We get back field values, weight values, used bins
-                f, w, q, u = self._bin_field(ds, field, weight, accumulation,
-                                          args=args, check_cut=True)
-                data[field] += f        # running total
-                weight_data[field] += w # running total
-                used |= u       # running 'or'
-                std_data[field][u] += w[u] * (q[u]/w[u] + \
-                    (f[u]/w[u] -
-                     data[field][u]/weight_data[field][u])**2) # running total
-        for key in data:
-            data[key] = self.comm.mpi_allreduce(data[key], op='sum')
-        for key in weight_data:
-            weight_data[key] = self.comm.mpi_allreduce(weight_data[key], op='sum')
-        used = self.comm.mpi_allreduce(used, op='sum')
-        # When the loop completes the parallel finalizer gets called
-        #pbar.finish()
-        ub = np.where(used)
-        for field in fields:
-            if weight: # Now, at the end, we divide out.
-                data[field][ub] /= weight_data[field][ub]
-                std_data[field][ub] /= weight_data[field][ub]
-            self[field] = data[field]
-            self["%s_std" % field] = np.sqrt(std_data[field])
-        self["UsedBins"] = used
-
-        if fractional:
-            for field in fields:
-                self.field_data[field] /= self.field_data[field].sum()
-
-    def keys(self):
-        return self.field_data.keys()
-
-    def __getitem__(self, key):
-        # This raises a KeyError if it doesn't exist
-        # This is because we explicitly want to add all fields
-        return self.field_data[key]
-
-    def __setitem__(self, key, value):
-        self.field_data[key] = value
-
-    def _get_field(self, source, field, check_cut):
-        # This is where we will iterate to get all contributions to a field
-        # which is how we will implement hybrid particle/cell fields
-        # but...  we default to just the field.
-        data = []
-        data.append(source[field].astype('float64'))
-        return uconcatenate(data, axis=0)
-
-    def _fix_pickle(self):
-        if isinstance(self._data_source, tuple):
-            self._data_source = self._data_source[1]
-
-# @todo: Fix accumulation with overriding
-class BinnedProfile1D(BinnedProfile):
-    """
-    A 'Profile' produces either a weighted (or unweighted) average or a
-    straight sum of a field in a bin defined by another field.  In the case
-    of a weighted average, we have: p_i = sum( w_i * v_i ) / sum(w_i)
-
-    We accept a *data_source*, which will be binned into *n_bins*
-    by the field *bin_field* between the *lower_bound* and the
-    *upper_bound*.  These bins may or may not be equally divided
-    in *log_space*, and the *lazy_reader* flag controls whether we
-    use a memory conservative approach. If *end_collect* is True,
-    take all values outside the given bounds and store them in the
-    0 and *n_bins*-1 values.
-    """
-    def __init__(self, data_source, n_bins, bin_field,
-                 lower_bound, upper_bound,
-                 log_space = True,
-                 end_collect=False):
-        BinnedProfile.__init__(self, data_source)
-        self.bin_field = bin_field
-        self._x_log = log_space
-        self.end_collect = end_collect
-        self.n_bins = n_bins
-
-        # Get our bins
-        if log_space:
-            if lower_bound <= 0.0 or upper_bound <= 0.0:
-                raise YTIllDefinedBounds(lower_bound, upper_bound)
-            func = np.logspace
-            lower_bound, upper_bound = np.log10(lower_bound), np.log10(upper_bound)
-        else:
-            func = np.linspace
-
-        # These are the bin *edges*
-        self._bins = func(lower_bound, upper_bound, n_bins + 1)
-
-        # These are the bin *left edges*.  These are the x-axis values
-        # we plot in the PlotCollection
-        self[bin_field] = self._bins
-
-        # If we are not being memory-conservative, grab all the bins
-        # and the inverse indices right now.
-
-    def _get_empty_field(self):
-        return np.zeros(self[self.bin_field].size, dtype='float64')
-
-    @preserve_source_parameters
-    def _bin_field(self, source, field, weight, accumulation,
-                   args, check_cut=False):
-        mi, inv_bin_indices = args # Args has the indices to use as input
-        # check_cut is set if source != self._data_source
-        source_data = self._get_field(source, field, check_cut)
-        if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = np.ones(source_data.shape, dtype='float64')
-        self.total_stuff = source_data.sum()
-        binned_field = self._get_empty_field()
-        weight_field = self._get_empty_field()
-        m_field = self._get_empty_field()
-        q_field = self._get_empty_field()
-        used_field = self._get_empty_field()
-        mi = args[0]
-        bin_indices_x = args[1].ravel().astype('int64')
-        source_data = source_data[mi]
-        weight_data = weight_data[mi]
-        bin_profile1d(bin_indices_x, weight_data, source_data,
-                      weight_field, binned_field,
-                      m_field, q_field, used_field)
-        # Fix for laziness, because at the *end* we will be
-        # summing up all of the histograms and dividing by the
-        # weights.  Accumulation likely doesn't work with weighted
-        # average fields.
-        if accumulation:
-            binned_field = np.add.accumulate(binned_field)
-        return binned_field, weight_field, q_field, \
-            used_field.astype("bool")
-
-    @preserve_source_parameters
-    def _get_bins(self, source, check_cut=False):
-        source_data = self._get_field(source, self.bin_field, check_cut)
-        if source_data.size == 0: # Nothing for us here.
-            raise YTEmptyProfileData()
-        # Truncate at boundaries.
-        if self.end_collect:
-            mi = np.ones_like(source_data).astype('bool')
-        else:
-            mi = ((source_data > self._bins.min())
-               &  (source_data < self._bins.max()))
-        sd = source_data[mi]
-        if sd.size == 0:
-            raise YTEmptyProfileData()
-        # Stick the bins into our fixed bins, set at initialization
-        bin_indices = np.digitize(sd, self._bins)
-        if self.end_collect: #limit the range of values to 0 and n_bins-1
-            bin_indices = np.clip(bin_indices, 0, self.n_bins - 1)
-        else: #throw away outside values
-            bin_indices -= 1
-
-        return (mi, bin_indices)
-
-    def choose_bins(self, bin_style):
-        # Depending on the bin_style, choose from bin edges 0...N either:
-        # both: 0...N, left: 0...N-1, right: 1...N
-        # center: N bins that are the average (both in linear or log
-        # space) of each pair of left/right edges
-        x = self.field_data[self.bin_field]
-        if bin_style is 'both': pass
-        elif bin_style is 'left': x = x[:-1]
-        elif bin_style is 'right': x = x[1:]
-        elif bin_style is 'center':
-            if self._x_log: x=np.log10(x)
-            x = 0.5*(x[:-1] + x[1:])
-            if self._x_log: x=10**x
-        else:
-            mylog.error('Did not recognize bin_style')
-            raise ValueError
-        return x
-
-    def write_out(self, filename, format="%0.16e", bin_style='left'):
-        '''
-        Write out data in ascii file, using *format* and
-        *bin_style* (left, right, center, both).
-        '''
-        fid = open(filename,"w")
-        fields = [field for field in sorted(self.field_data.keys()) if field != "UsedBins"]
-        fields.remove(self.bin_field)
-        fid.write("\t".join(["#"] + [self.bin_field] + fields + ["\n"]))
-
-        field_data = np.array(self.choose_bins(bin_style))
-        if bin_style is 'both':
-            field_data = np.append([field_data], np.array([self.field_data[field] for field in fields]), axis=0)
-        else:
-            field_data = np.append([field_data], np.array([self.field_data[field][:-1] for field in fields]), axis=0)
-
-        for line in range(field_data.shape[1]):
-            field_data[:,line].tofile(fid, sep="\t", format=format)
-            fid.write("\n")
-        fid.close()
-
-    def write_out_h5(self, filename, group_prefix=None, bin_style='left'):
-        """
-        Write out data in an hdf5 file *filename*.  Each profile is
-        put into a group, named by the axis fields.  Optionally a
-        *group_prefix* can be prepended to the group name.  If the
-        group already exists, it will delete and replace.  However,
-        due to hdf5 functionality, in only unlinks the data, so an
-        h5repack may be necessary to conserve space.  Axes values are
-        saved in group attributes.  Bins will be saved based on
-        *bin_style* (left, right, center, both).
-        """
-        fid = h5py.File(filename)
-        fields = [field for field in sorted(self.field_data.keys()) if (field != "UsedBins" and field != self.bin_field)]
-        if group_prefix is None:
-            name = "%s-1d" % (self.bin_field)
-        else:
-            name = "%s-%s-1d" % (group_prefix, self.bin_field)
-
-        if name in fid:
-            mylog.info("Profile file is getting larger since you are attempting to overwrite a profile. You may want to repack")
-            del fid[name]
-        group = fid.create_group(name)
-        group.attrs["x-axis-%s" % self.bin_field] = self.choose_bins(bin_style)
-        for field in fields:
-            dset = group.create_dataset("%s" % field, data=self.field_data[field][:-1])
-        fid.close()
-
-    def _get_bin_fields(self):
-        return [self.bin_field]
-
-class BinnedProfile2D(BinnedProfile):
-    """
-    A 'Profile' produces either a weighted (or unweighted) average
-    or a straight sum of a field in a bin defined by two other
-    fields.  In the case of a weighted average, we have: p_i =
-    sum( w_i * v_i ) / sum(w_i)
-
-    We accept a *data_source*, which will be binned into
-    *x_n_bins* by the field *x_bin_field* between the
-    *x_lower_bound* and the *x_upper_bound* and then again binned
-    into *y_n_bins* by the field *y_bin_field* between the
-    *y_lower_bound* and the *y_upper_bound*.  These bins may or
-    may not be equally divided in log-space as specified by
-    *x_log* and *y_log*, and the *lazy_reader* flag controls
-    whether we use a memory conservative approach. If
-    *end_collect* is True, take all values outside the given
-    bounds and store them in the 0 and *n_bins*-1 values.
-    """
-    def __init__(self, data_source,
-                 x_n_bins, x_bin_field, x_lower_bound, x_upper_bound, x_log,
-                 y_n_bins, y_bin_field, y_lower_bound, y_upper_bound, y_log,
-                 end_collect=False):
-        BinnedProfile.__init__(self, data_source)
-        self.x_bin_field = x_bin_field
-        self.y_bin_field = y_bin_field
-        self._x_log = x_log
-        self._y_log = y_log
-        self.end_collect = end_collect
-        self.x_n_bins = x_n_bins
-        self.y_n_bins = y_n_bins
-
-        func = {True:np.logspace, False:np.linspace}[x_log]
-        bounds = fix_bounds(x_lower_bound, x_upper_bound, x_log)
-        self._x_bins = func(bounds[0], bounds[1], x_n_bins + 1)
-        self[x_bin_field] = self._x_bins
-
-        func = {True:np.logspace, False:np.linspace}[y_log]
-        bounds = fix_bounds(y_lower_bound, y_upper_bound, y_log)
-        self._y_bins = func(bounds[0], bounds[1], y_n_bins + 1)
-        self[y_bin_field] = self._y_bins
-
-        if np.any(np.isnan(self[x_bin_field])) \
-            or np.any(np.isnan(self[y_bin_field])):
-            mylog.error("Your min/max values for x, y have given me a nan.")
-            mylog.error("Usually this means you are asking for log, with a zero bound.")
-            raise ValueError
-
-    def _get_empty_field(self):
-        return np.zeros((self[self.x_bin_field].size,
-                         self[self.y_bin_field].size), dtype='float64')
-
-    @preserve_source_parameters
-    def _bin_field(self, source, field, weight, accumulation,
-                   args, check_cut=False):
-        source_data = self._get_field(source, field, check_cut)
-        if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = np.ones(source_data.shape, dtype='float64')
-        self.total_stuff = source_data.sum()
-        binned_field = self._get_empty_field()
-        weight_field = self._get_empty_field()
-        m_field = self._get_empty_field()
-        q_field = self._get_empty_field()
-        used_field = self._get_empty_field()
-        mi = args[0]
-        bin_indices_x = args[1].ravel().astype('int64')
-        bin_indices_y = args[2].ravel().astype('int64')
-        source_data = source_data[mi]
-        weight_data = weight_data[mi]
-        nx = bin_indices_x.size
-        #mylog.debug("Binning %s / %s times", source_data.size, nx)
-        bin_profile2d(bin_indices_x, bin_indices_y, weight_data, source_data,
-                      weight_field, binned_field, m_field, q_field, used_field)
-        if accumulation: # Fix for laziness
-            if not iterable(accumulation):
-                raise SyntaxError("Accumulation needs to have length 2")
-            if accumulation[0]:
-                binned_field = np.add.accumulate(binned_field, axis=0)
-            if accumulation[1]:
-                binned_field = np.add.accumulate(binned_field, axis=1)
-        return binned_field, weight_field, q_field, \
-            used_field.astype("bool")
-
-    @preserve_source_parameters
-    def _get_bins(self, source, check_cut=False):
-        source_data_x = self._get_field(source, self.x_bin_field, check_cut)
-        source_data_y = self._get_field(source, self.y_bin_field, check_cut)
-        if source_data_x.size == 0:
-            raise YTEmptyProfileData()
-
-        if self.end_collect:
-            mi = np.arange(source_data_x.size)
-        else:
-            mi = np.where( (source_data_x > self._x_bins.min())
-                           & (source_data_x < self._x_bins.max())
-                           & (source_data_y > self._y_bins.min())
-                           & (source_data_y < self._y_bins.max()))
-        sd_x = source_data_x[mi]
-        sd_y = source_data_y[mi]
-        if sd_x.size == 0 or sd_y.size == 0:
-            raise YTEmptyProfileData()
-
-        bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
-        bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
-        if self.end_collect:
-            bin_indices_x = np.minimum(np.maximum(1, bin_indices_x), self.x_n_bins) - 1
-            bin_indices_y = np.minimum(np.maximum(1, bin_indices_y), self.y_n_bins) - 1
-
-        # Now we set up our inverse bin indices
-        return (mi, bin_indices_x, bin_indices_y)
-
-    def choose_bins(self, bin_style):
-        # Depending on the bin_style, choose from bin edges 0...N either:
-        # both: 0...N, left: 0...N-1, right: 1...N
-        # center: N bins that are the average (both in linear or log
-        # space) of each pair of left/right edges
-
-        x = self.field_data[self.x_bin_field]
-        y = self.field_data[self.y_bin_field]
-        if bin_style is 'both':
-            pass
-        elif bin_style is 'left':
-            x = x[:-1]
-            y = y[:-1]
-        elif bin_style is 'right':
-            x = x[1:]
-            y = y[1:]
-        elif bin_style is 'center':
-            if self._x_log: x=np.log10(x)
-            if self._y_log: y=np.log10(y)
-            x = 0.5*(x[:-1] + x[1:])
-            y = 0.5*(y[:-1] + y[1:])
-            if self._x_log: x=10**x
-            if self._y_log: y=10**y
-        else:
-            mylog.error('Did not recognize bin_style')
-            raise ValueError
-
-        return x,y
-
-    def write_out(self, filename, format="%0.16e", bin_style='left'):
-        """
-        Write out the values of x,y,v in ascii to *filename* for every
-        field in the profile.  Optionally a *format* can be specified.
-        Bins will be saved based on *bin_style* (left, right, center,
-        both).
-        """
-        fid = open(filename,"w")
-        fields = [field for field in sorted(self.field_data.keys()) if field != "UsedBins"]
-        fid.write("\t".join(["#"] + [self.x_bin_field, self.y_bin_field]
-                          + fields + ["\n"]))
-        x,y = self.choose_bins(bin_style)
-        x,y = np.meshgrid(x,y)
-        field_data = [x.ravel(), y.ravel()]
-        if bin_style is not 'both':
-            field_data += [self.field_data[field][:-1,:-1].ravel() for field in fields
-                           if field not in [self.x_bin_field, self.y_bin_field]]
-        else:
-            field_data += [self.field_data[field].ravel() for field in fields
-                           if field not in [self.x_bin_field, self.y_bin_field]]
-
-        field_data = np.array(field_data)
-        for line in range(field_data.shape[1]):
-            field_data[:,line].tofile(fid, sep="\t", format=format)
-            fid.write("\n")
-        fid.close()
-
-    def write_out_h5(self, filename, group_prefix=None, bin_style='left'):
-        """
-        Write out data in an hdf5 file.  Each profile is put into a
-        group, named by the axis fields.  Optionally a group_prefix
-        can be prepended to the group name.  If the group already
-        exists, it will delete and replace.  However, due to hdf5
-        functionality, in only unlinks the data, so an h5repack may be
-        necessary to conserve space.  Axes values are saved in group
-        attributes. Bins will be saved based on *bin_style* (left,
-        right, center, both).
-        """
-        fid = h5py.File(filename)
-        fields = [field for field in sorted(self.field_data.keys()) if (field != "UsedBins" and field != self.x_bin_field and field != self.y_bin_field)]
-        if group_prefix is None:
-            name = "%s-%s-2d" % (self.y_bin_field, self.x_bin_field)
-        else:
-            name = "%s-%s-%s-2d" % (group_prefix, self.y_bin_field, self.x_bin_field)
-        if name in fid:
-            mylog.info("Profile file is getting larger since you are attempting to overwrite a profile. You may want to repack")
-            del fid[name]
-        group = fid.create_group(name)
-
-        xbins, ybins = self.choose_bins(bin_style)
-        group.attrs["x-axis-%s" % self.x_bin_field] = xbins
-        group.attrs["y-axis-%s" % self.y_bin_field] = ybins
-        for field in fields:
-            dset = group.create_dataset("%s" % field, data=self.field_data[field][:-1,:-1])
-        fid.close()
-
-    def _get_bin_fields(self):
-        return [self.x_bin_field, self.y_bin_field]
-
-def fix_bounds(upper, lower, logit):
-    if logit:
-        if lower <= 0.0 or upper <= 0.0:
-            raise YTIllDefinedBounds(lower, upper)
-        return np.log10(upper), np.log10(lower)
-    return upper, lower
-
-class BinnedProfile3D(BinnedProfile):
-    """
-    A 'Profile' produces either a weighted (or unweighted) average
-    or a straight sum of a field in a bin defined by two other
-    fields.  In the case of a weighted average, we have: p_i =
-    sum( w_i * v_i ) / sum(w_i)
-
-    We accept a *data_source*, which will be binned into
-    *(x,y,z)_n_bins* by the field *(x,y,z)_bin_field* between the
-    *(x,y,z)_lower_bound* and the *(x,y,z)_upper_bound*.  These bins may or
-    may not be equally divided in log-space as specified by *(x,y,z)_log*.
-    If *end_collect* is True, take all values outside the given bounds and
-    store them in the 0 and *n_bins*-1 values.
-    """
-    def __init__(self, data_source,
-                 x_n_bins, x_bin_field, x_lower_bound, x_upper_bound, x_log,
-                 y_n_bins, y_bin_field, y_lower_bound, y_upper_bound, y_log,
-                 z_n_bins, z_bin_field, z_lower_bound, z_upper_bound, z_log,
-                 end_collect=False):
-        BinnedProfile.__init__(self, data_source)
-        self.x_bin_field = x_bin_field
-        self.y_bin_field = y_bin_field
-        self.z_bin_field = z_bin_field
-        self._x_log = x_log
-        self._y_log = y_log
-        self._z_log = z_log
-        self.end_collect = end_collect
-        self.x_n_bins = x_n_bins
-        self.y_n_bins = y_n_bins
-        self.z_n_bins = z_n_bins
-
-        func = {True:np.logspace, False:np.linspace}[x_log]
-        bounds = fix_bounds(x_lower_bound, x_upper_bound, x_log)
-        self._x_bins = func(bounds[0], bounds[1], x_n_bins + 1)
-        self[x_bin_field] = self._x_bins
-
-        func = {True:np.logspace, False:np.linspace}[y_log]
-        bounds = fix_bounds(y_lower_bound, y_upper_bound, y_log)
-        self._y_bins = func(bounds[0], bounds[1], y_n_bins + 1)
-        self[y_bin_field] = self._y_bins
-
-        func = {True:np.logspace, False:np.linspace}[z_log]
-        bounds = fix_bounds(z_lower_bound, z_upper_bound, z_log)
-        self._z_bins = func(bounds[0], bounds[1], z_n_bins + 1)
-        self[z_bin_field] = self._z_bins
-
-        if np.any(np.isnan(self[x_bin_field])) \
-            or np.any(np.isnan(self[y_bin_field])) \
-            or np.any(np.isnan(self[z_bin_field])):
-            mylog.error("Your min/max values for x, y or z have given me a nan.")
-            mylog.error("Usually this means you are asking for log, with a zero bound.")
-            raise ValueError
-
-    def _get_empty_field(self):
-        return np.zeros((self[self.x_bin_field].size,
-                         self[self.y_bin_field].size,
-                         self[self.z_bin_field].size), dtype='float64')
-
-    @preserve_source_parameters
-    def _bin_field(self, source, field, weight, accumulation,
-                   args, check_cut=False):
-        source_data = self._get_field(source, field, check_cut)
-        weight_data = np.ones(source_data.shape).astype('float64')
-        if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = np.ones(source_data.shape).astype('float64')
-        self.total_stuff = source_data.sum()
-        binned_field = self._get_empty_field()
-        weight_field = self._get_empty_field()
-        m_field = self._get_empty_field()
-        q_field = self._get_empty_field()
-        used_field = self._get_empty_field()
-        mi = args[0]
-        bin_indices_x = args[1].ravel().astype('int64')
-        bin_indices_y = args[2].ravel().astype('int64')
-        bin_indices_z = args[3].ravel().astype('int64')
-        source_data = source_data[mi]
-        weight_data = weight_data[mi]
-        bin_profile3d(bin_indices_x, bin_indices_y, bin_indices_z,
-                      weight_data, source_data, weight_field, binned_field,
-                      m_field, q_field, used_field)
-        if accumulation: # Fix for laziness
-            if not iterable(accumulation):
-                raise SyntaxError("Accumulation needs to have length 2")
-            if accumulation[0]:
-                binned_field = np.add.accumulate(binned_field, axis=0)
-            if accumulation[1]:
-                binned_field = np.add.accumulate(binned_field, axis=1)
-            if accumulation[2]:
-                binned_field = np.add.accumulate(binned_field, axis=2)
-        return binned_field, weight_field, q_field, \
-            used_field.astype("bool")
-
-    @preserve_source_parameters
-    def _get_bins(self, source, check_cut=False):
-        source_data_x = self._get_field(source, self.x_bin_field, check_cut)
-        source_data_y = self._get_field(source, self.y_bin_field, check_cut)
-        source_data_z = self._get_field(source, self.z_bin_field, check_cut)
-        if source_data_x.size == 0:
-            raise YTEmptyProfileData()
-        if self.end_collect:
-            mi = np.arange(source_data_x.size)
-        else:
-            mi = ( (source_data_x > self._x_bins.min())
-                 & (source_data_x < self._x_bins.max())
-                 & (source_data_y > self._y_bins.min())
-                 & (source_data_y < self._y_bins.max())
-                 & (source_data_z > self._z_bins.min())
-                 & (source_data_z < self._z_bins.max()))
-        sd_x = source_data_x[mi]
-        sd_y = source_data_y[mi]
-        sd_z = source_data_z[mi]
-        if sd_x.size == 0 or sd_y.size == 0 or sd_z.size == 0:
-            raise YTEmptyProfileData()
-
-        bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
-        bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
-        bin_indices_z = np.digitize(sd_z, self._z_bins) - 1
-        if self.end_collect:
-            bin_indices_x = np.minimum(np.maximum(1, bin_indices_x), self.x_n_bins) - 1
-            bin_indices_y = np.minimum(np.maximum(1, bin_indices_y), self.y_n_bins) - 1
-            bin_indices_z = np.minimum(np.maximum(1, bin_indices_z), self.z_n_bins) - 1
-
-        # Now we set up our inverse bin indices
-        return (mi, bin_indices_x, bin_indices_y, bin_indices_z)
-
-    def choose_bins(self, bin_style):
-        # Depending on the bin_style, choose from bin edges 0...N either:
-        # both: 0...N, left: 0...N-1, right: 1...N
-        # center: N bins that are the average (both in linear or log
-        # space) of each pair of left/right edges
-
-        x = self.field_data[self.x_bin_field]
-        y = self.field_data[self.y_bin_field]
-        z = self.field_data[self.z_bin_field]
-        if bin_style is 'both':
-            pass
-        elif bin_style is 'left':
-            x = x[:-1]
-            y = y[:-1]
-            z = z[:-1]
-        elif bin_style is 'right':
-            x = x[1:]
-            y = y[1:]
-            z = z[1:]
-        elif bin_style is 'center':
-            if self._x_log: x=np.log10(x)
-            if self._y_log: y=np.log10(y)
-            if self._z_log: z=np.log10(z)
-            x = 0.5*(x[:-1] + x[1:])
-            y = 0.5*(y[:-1] + y[1:])
-            z = 0.5*(z[:-1] + z[1:])
-            if self._x_log: x=10**x
-            if self._y_log: y=10**y
-            if self._z_log: y=10**z
-        else:
-            mylog.error('Did not recognize bin_style')
-            raise ValueError
-
-        return x,y,z
-
-    def write_out(self, filename, format="%0.16e"):
-        pass # Will eventually dump HDF5
-
-    def write_out_h5(self, filename, group_prefix=None, bin_style='left'):
-        """
-        Write out data in an hdf5 file.  Each profile is put into a
-        group, named by the axis fields.  Optionally a group_prefix
-        can be prepended to the group name.  If the group already
-        exists, it will delete and replace.  However, due to hdf5
-        functionality, in only unlinks the data, so an h5repack may be
-        necessary to conserve space.  Axes values are saved in group
-        attributes.
-        """
-        fid = h5py.File(filename)
-        fields = [field for field in sorted(self.field_data.keys())
-                  if (field != "UsedBins" and field != self.x_bin_field and field != self.y_bin_field and field != self.z_bin_field)]
-        if group_prefix is None:
-            name = "%s-%s-%s-3d" % (self.z_bin_field, self.y_bin_field, self.x_bin_field)
-        else:
-            name = "%s-%s-%s-%s-3d" % (group_prefix,self.z_bin_field, self.y_bin_field, self.x_bin_field)
-
-        if name in fid:
-            mylog.info("Profile file is getting larger since you are attempting to overwrite a profile. You may want to repack")
-            del fid[name]
-        group = fid.create_group(name)
-
-        xbins, ybins, zbins= self.choose_bins(bin_style)
-        group.attrs["x-axis-%s" % self.x_bin_field] = xbins
-        group.attrs["y-axis-%s" % self.y_bin_field] = ybins
-        group.attrs["z-axis-%s" % self.z_bin_field] = zbins
-
-        for field in fields:
-            dset = group.create_dataset("%s" % field, data=self.field_data[field][:-1,:-1,:-1])
-        fid.close()
-
-
-    def _get_bin_fields(self):
-        return [self.x_bin_field, self.y_bin_field, self.z_bin_field]
-
-    def store_profile(self, name, force=False):
-        """
-        By identifying the profile with a fixed, user-input *name* we can
-        store it in the serialized data section of the index file.  *force*
-        governs whether or not an existing profile with that name will be
-        overwritten.
-        """
-        # First we get our data in order
-        order = []
-        set_attr = {'x_bin_field':self.x_bin_field,
-                    'y_bin_field':self.y_bin_field,
-                    'z_bin_field':self.z_bin_field,
-                    'x_bin_values':self[self.x_bin_field],
-                    'y_bin_values':self[self.y_bin_field],
-                    'z_bin_values':self[self.z_bin_field],
-                    '_x_log':self._x_log,
-                    '_y_log':self._y_log,
-                    '_z_log':self._z_log,
-                    'shape': (self[self.x_bin_field].size,
-                              self[self.y_bin_field].size,
-                              self[self.z_bin_field].size),
-                    'field_order':order }
-        values = []
-        for field in self.field_data:
-            if field in set_attr.values(): continue
-            order.append(field)
-            values.append(self[field].ravel())
-        values = np.array(values).transpose()
-        self._data_source.index.save_data(values, "/Profiles", name,
-                                              set_attr, force=force)
 
 class ProfileFieldAccumulator(object):
     def __init__(self, n_fields, size):

diff -r a05e6a7165714d6b4e4e049dc1310c77a7adc30c -r 84f2a7849d3d0c74ad581109954fce6230d820d9 yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -1,78 +1,13 @@
 from yt.testing import *
 from yt.data_objects.profiles import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
-    Profile1D, Profile2D, Profile3D, create_profile
+    Profile1D, \
+    Profile2D, \
+    Profile3D, \
+    create_profile
 
 _fields = ("density", "temperature", "dinosaurs", "tribbles")
 _units = ("g/cm**3", "K", "dyne", "erg")
 
-def test_binned_profiles():
-    return
-    ds = fake_random_ds(64, nprocs = 8, fields = _fields, units = _units)
-    nv = ds.domain_dimensions.prod()
-    dd = ds.all_data()
-    (rmi, rma), (tmi, tma), (dmi, dma) = dd.quantities["Extrema"](
-        ["density", "temperature", "dinosaurs"])
-    rt, tt, dt = dd.quantities["TotalQuantity"](
-        ["density", "temperature", "dinosaurs"])
-    # First we look at the 
-    for nb in [8, 16, 32, 64]:
-        # We log all the fields or don't log 'em all.  No need to do them
-        # individually.
-        for lf in [True, False]: 
-            # We have the min and the max, but to avoid cutting them off
-            # since we aren't doing end-collect, we cut a bit off the edges
-            for ec, e1, e2 in [(False, 0.9, 1.1), (True, 1.0, 1.0)]:
-                p1d = BinnedProfile1D(dd, 
-                    nb, "density", rmi*e1, rma*e2, lf,
-                    end_collect=ec)
-                p1d.add_fields(["ones", "temperature"], weight=None)
-                yield assert_equal, p1d["ones"].sum(), nv
-                yield assert_rel_equal, tt, p1d["temperature"].sum(), 7
-
-                p2d = BinnedProfile2D(dd, 
-                    nb, "density", rmi*e1, rma*e2, lf,
-                    nb, "temperature", tmi*e1, tma*e2, lf,
-                    end_collect=ec)
-                p2d.add_fields(["ones", "temperature"], weight=None)
-                yield assert_equal, p2d["ones"].sum(), nv
-                yield assert_rel_equal, tt, p2d["temperature"].sum(), 7
-
-                p3d = BinnedProfile3D(dd, 
-                    nb, "density", rmi*e1, rma*e2, lf,
-                    nb, "temperature", tmi*e1, tma*e2, lf,
-                    nb, "dinosaurs", dmi*e1, dma*e2, lf,
-                    end_collect=ec)
-                p3d.add_fields(["ones", "temperature"], weight=None)
-                yield assert_equal, p3d["ones"].sum(), nv
-                yield assert_rel_equal, tt, p3d["temperature"].sum(), 7
-
-        p1d = BinnedProfile1D(dd, nb, "x", 0.0, 1.0, log_space=False)
-        p1d.add_fields("ones", weight=None)
-        av = nv / nb
-        yield assert_equal, p1d["ones"][:-1], np.ones(nb)*av
-        # We re-bin ones with a weight now
-        p1d.add_fields(["ones"], weight="temperature")
-        yield assert_equal, p1d["ones"][:-1], np.ones(nb)
-
-        p2d = BinnedProfile2D(dd, nb, "x", 0.0, 1.0, False,
-                                  nb, "y", 0.0, 1.0, False)
-        p2d.add_fields("ones", weight=None)
-        av = nv / nb**2
-        yield assert_equal, p2d["ones"][:-1,:-1], np.ones((nb, nb))*av
-        # We re-bin ones with a weight now
-        p2d.add_fields(["ones"], weight="temperature")
-        yield assert_equal, p2d["ones"][:-1,:-1], np.ones((nb, nb))
-
-        p3d = BinnedProfile3D(dd, nb, "x", 0.0, 1.0, False,
-                                  nb, "y", 0.0, 1.0, False,
-                                  nb, "z", 0.0, 1.0, False)
-        p3d.add_fields("ones", weight=None)
-        av = nv / nb**3
-        yield assert_equal, p3d["ones"][:-1,:-1,:-1], np.ones((nb, nb, nb))*av
-        # We re-bin ones with a weight now
-        p3d.add_fields(["ones"], weight="temperature")
-        yield assert_equal, p3d["ones"][:-1,:-1,:-1], np.ones((nb,nb,nb))
 
 def test_profiles():
     ds = fake_random_ds(64, nprocs = 8, fields = _fields, units = _units)


https://bitbucket.org/yt_analysis/yt/commits/5ef6cc967fe8/
Changeset:   5ef6cc967fe8
Branch:      yt
User:        ngoldbaum
Date:        2015-11-04 22:02:18+00:00
Summary:     Linting yt.data_objects
Affected #:  30 files

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/analyzer_objects.py
--- a/yt/data_objects/analyzer_objects.py
+++ b/yt/data_objects/analyzer_objects.py
@@ -15,7 +15,6 @@
 
 import inspect
 
-from yt.funcs import *
 from yt.extern.six import add_metaclass
 
 analysis_task_registry = {}
@@ -23,7 +22,7 @@
 class RegisteredTask(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
-        if hasattr(cls, "skip") and cls.skip == False:
+        if hasattr(cls, "skip") and cls.skip is False:
             return
         analysis_task_registry[cls.__name__] = cls
 

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -15,21 +15,29 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-import math
-import weakref
-import itertools
-import shelve
 from functools import wraps
 import fileinput
 from re import finditer
+from tempfile import TemporaryFile
 import os
+import zipfile
 
 from yt.config import ytcfg
-from yt.funcs import *
-from yt.utilities.logger import ytLogger
-from .data_containers import \
-    YTSelectionContainer1D, YTSelectionContainer2D, YTSelectionContainer3D, \
-    restore_field_information_state, YTFieldData
+from yt.data_objects.data_containers import \
+    YTSelectionContainer1D, \
+    YTSelectionContainer2D, \
+    YTSelectionContainer3D, \
+    YTFieldData
+from yt.funcs import \
+    ensure_list, \
+    mylog, \
+    get_memory_usage, \
+    iterable, \
+    only_on_root
+from yt.utilities.exceptions import \
+    YTParticleDepositionNotImplemented, \
+    YTNoAPIKey, \
+    YTTooManyVertices
 from yt.utilities.lib.QuadTree import \
     QuadTree
 from yt.utilities.lib.Interpolators import \
@@ -38,8 +46,6 @@
     fill_region
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
-from yt.utilities.data_point_utilities import CombineGrids,\
-    DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
 from yt.utilities.minimal_representation import \
     MinimalProjectionData
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -47,16 +53,10 @@
 from yt.units.unit_object import Unit
 import yt.geometry.particle_deposit as particle_deposit
 from yt.utilities.grid_data_format.writer import write_to_gdf
+from yt.fields.field_exceptions import \
+    NeedsOriginalGrid
 from yt.frontends.stream.api import load_uniform_grid
 
-from yt.fields.field_exceptions import \
-    NeedsGridType,\
-    NeedsOriginalGrid,\
-    NeedsDataField,\
-    NeedsProperty,\
-    NeedsParameter
-from yt.fields.derived_field import \
-    TranslationFunc
 
 class YTStreamline(YTSelectionContainer1D):
     """
@@ -369,10 +369,10 @@
         data['pdy'] = self.ds.arr(pdy, code_length)
         data['fields'] = nvals
         # Now we run the finalizer, which is ignored if we don't need it
-        fd = data['fields']
+        data['fields']
         field_data = np.hsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
-            finfo = self.ds._get_field_info(*field)
+            self.ds._get_field_info(*field)
             mylog.debug("Setting field %s", field)
             input_units = self._projected_units[field]
             self[field] = self.ds.arr(field_data[fi].ravel(), input_units)
@@ -939,7 +939,6 @@
         ls.current_level += 1
         ls.current_dx = ls.base_dx / \
             self.ds.relative_refinement(0, ls.current_level)
-        LL = ls.left_edge - ls.domain_left_edge
         ls.old_global_startindex = ls.global_startindex
         ls.global_startindex, end_index, ls.current_dims = \
             self._minimal_box(ls.current_dx)
@@ -1509,11 +1508,8 @@
                     color_log = True, emit_log = True, plot_index = None,
                     color_field_max = None, color_field_min = None,
                     emit_field_max = None, emit_field_min = None):
-        import io
-        from sys import version
         if plot_index is None:
             plot_index = 0
-            vmax=0
         ftype = [("cind", "uint8"), ("emit", "float")]
         vtype = [("x","float"),("y","float"), ("z","float")]
         #(0) formulate vertices
@@ -1552,7 +1548,7 @@
                 tmp = self.vertices[i,:]
                 np.divide(tmp, dist_fac, tmp)
                 v[ax][:] = tmp
-        return  v, lut, transparency, emiss, f['cind']
+        return v, lut, transparency, emiss, f['cind']
 
 
     def export_ply(self, filename, bounds = None, color_field = None,
@@ -1734,8 +1730,6 @@
         api_key = api_key or ytcfg.get("yt","sketchfab_api_key")
         if api_key in (None, "None"):
             raise YTNoAPIKey("SketchFab.com", "sketchfab_api_key")
-        import zipfile, json
-        from tempfile import TemporaryFile
 
         ply_file = TemporaryFile()
         self.export_ply(ply_file, bounds, color_field, color_map, color_log,

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -13,32 +13,39 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
 import itertools
-import os
-import types
 import uuid
-from yt.extern.six import string_types
-
-data_object_registry = {}
 
 import numpy as np
 import weakref
 import shelve
+
+from collections import defaultdict
 from contextlib import contextmanager
 
-from yt.funcs import get_output_filename
-from yt.funcs import *
-
 from yt.data_objects.particle_io import particle_handler_registry
 from yt.frontends.ytdata.utilities import \
     save_as_dataset
+from yt.funcs import \
+    get_output_filename, \
+    mylog, \
+    ensure_list, \
+    fix_axis, \
+    iterable
 from yt.units.unit_object import UnitParseError
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
 from yt.utilities.exceptions import \
     YTUnitConversionError, \
     YTFieldUnitError, \
     YTFieldUnitParseError, \
-    YTSpatialFieldUnitError
+    YTSpatialFieldUnitError, \
+    YTCouldNotGenerateField, \
+    YTFieldNotParseable, \
+    YTFieldNotFound, \
+    YTFieldTypeNotFound, \
+    YTDataSelectorNotImplemented
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -55,9 +62,10 @@
     compose_selector
 from yt.extern.six import add_metaclass, string_types
 
+data_object_registry = {}
+
 def force_array(item, shape):
     try:
-        sh = item.shape
         return item.copy()
     except AttributeError:
         if item:
@@ -831,7 +839,7 @@
             fields_to_get.append(field)
         if len(fields_to_get) == 0 and len(fields_to_generate) == 0:
             return
-        elif self._locked == True:
+        elif self._locked is True:
             raise GenerationInProgress(fields)
         # Track which ones we want in the end
         ofields = set(list(self.field_data.keys())
@@ -1400,7 +1408,7 @@
         with child cells are left untouched.
         """
         for grid in self._grids:
-            if default_value != None:
+            if default_value is not None:
                 grid[field] = np.ones(grid.ActiveDimensions)*default_value
             grid[field][self._get_point_indices(grid)] = value
 

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -17,18 +17,15 @@
 
 import numpy as np
 
-from yt.funcs import *
-
-from yt.config import ytcfg
-from yt.units.yt_array import YTArray, uconcatenate, array_like_field
-from yt.utilities.exceptions import YTFieldNotFound
+from yt.funcs import \
+    camelcase_to_underscore, \
+    ensure_list
+from yt.units.yt_array import array_like_field
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_objects
-from yt.utilities.lib.Octree import Octree
 from yt.utilities.physical_constants import \
     gravitational_constant_cgs, \
     HUGE
-from yt.utilities.math_utils import prec_accum
 from yt.extern.six import add_metaclass
 
 derived_quantity_registry = {}
@@ -202,7 +199,6 @@
     def __call__(self):
         self.data_source.ds.index
         fi = self.data_source.ds.field_info
-        fields = []
         if ("gas", "cell_mass") in fi:
             gas = super(TotalMass, self).__call__([('gas', 'cell_mass')])
         else:

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -13,25 +13,17 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import pdb
 import weakref
-import itertools
 import numpy as np
 
-from yt.funcs import *
-
 from yt.data_objects.data_containers import \
     YTFieldData, \
-    YTDataContainer, \
     YTSelectionContainer
-from yt.fields.field_exceptions import \
-    NeedsGridType, \
-    NeedsOriginalGrid, \
-    NeedsDataField, \
-    NeedsProperty, \
-    NeedsParameter
 from yt.geometry.selection_routines import convert_mask_to_indices
 import yt.geometry.particle_deposit as particle_deposit
+from yt.utilities.exceptions import \
+    YTFieldTypeNotFound, \
+    YTParticleDepositionNotImplemented
 from yt.utilities.lib.Interpolators import \
     ghost_zone_interpolate
 
@@ -234,15 +226,12 @@
         # We will attempt this by creating a datacube that is exactly bigger
         # than the grid by nZones*dx in each direction
         nl = self.get_global_startindex() - n_zones
-        nr = nl + self.ActiveDimensions + 2 * n_zones
         new_left_edge = nl * self.dds + self.ds.domain_left_edge
-        new_right_edge = nr * self.dds + self.ds.domain_left_edge
 
         # Something different needs to be done for the root grid, though
         level = self.Level
         if all_levels:
             level = self.index.max_level + 1
-        args = (level, new_left_edge, new_right_edge)
         kwargs = {'dims': self.ActiveDimensions + 2*n_zones,
                   'num_ghost_zones':n_zones,
                   'use_pbar':False, 'fields':fields}

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -18,23 +18,20 @@
 
 from yt.data_objects.data_containers import \
     YTFieldData, \
-    YTDataContainer, \
     YTSelectionContainer
-from yt.fields.field_exceptions import \
-    NeedsGridType, \
-    NeedsOriginalGrid, \
-    NeedsDataField, \
-    NeedsProperty, \
-    NeedsParameter
 import yt.geometry.particle_deposit as particle_deposit
 import yt.geometry.particle_smooth as particle_smooth
-from yt.funcs import *
+
+from yt.funcs import mylog
 from yt.utilities.lib.geometry_utils import compute_morton
 from yt.geometry.particle_oct_container import \
     ParticleOctreeContainer
 from yt.units.yt_array import YTArray
 from yt.units.dimensions import length
-from yt.utilities.exceptions import YTInvalidPositionArray
+from yt.utilities.exceptions import \
+    YTInvalidPositionArray, \
+    YTFieldTypeNotFound, \
+    YTParticleDepositionNotImplemented
 
 def cell_count_cache(func):
     def cc_cache_func(self, dobj):

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -14,16 +14,14 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
 import copy
+from collections import defaultdict
 
 from contextlib import contextmanager
-from functools import wraps
 
 from yt.fields.field_info_container import \
     NullFunc, TranslationFunc
 from yt.utilities.exceptions import YTIllDefinedFilter
-from yt.funcs import *
 
 # One to many mapping
 filter_registry = defaultdict(list)

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -15,7 +15,11 @@
 
 import numpy as np
 
-from yt.funcs import *
+from collections import defaultdict
+
+from yt.funcs import \
+    ensure_list, \
+    mylog
 from yt.extern.six import add_metaclass
 
 particle_handler_registry = defaultdict()

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -13,14 +13,15 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from yt.frontends.ytdata.utilities import \
     save_as_dataset
-from yt.funcs import get_output_filename
-from yt.funcs import *
-from yt.units.yt_array import uconcatenate, array_like_field
+from yt.funcs import \
+    get_output_filename, \
+    ensure_list, \
+    iterable
+from yt.units.yt_array import array_like_field
 from yt.units.unit_object import Unit
 from yt.data_objects.data_containers import YTFieldData
 from yt.utilities.lib.misc_utilities import \
@@ -29,7 +30,6 @@
     new_bin_profile3d
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_objects
-from yt.utilities.exceptions import YTEmptyProfileData
 from yt.utilities.lib.CICDeposit import \
     CICDeposit_2, \
     NGPDeposit_2

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -14,27 +14,27 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import types
 import numpy as np
-from contextlib import contextmanager
 
-from yt.funcs import *
-from yt.utilities.lib.alt_ray_tracers import cylindrical_ray_trace
-from yt.utilities.orientation import Orientation
-from .data_containers import \
+from yt.data_objects.data_containers import \
     YTSelectionContainer0D, YTSelectionContainer1D, \
     YTSelectionContainer2D, YTSelectionContainer3D
-from yt.data_objects.derived_quantities import \
-    DerivedQuantityCollection
+from yt.funcs import \
+    ensure_list, \
+    iterable, \
+    validate_width_tuple, \
+    fix_length
+from yt.units.yt_array import \
+    YTArray
 from yt.utilities.exceptions import \
     YTSphereTooSmall, \
     YTIllDefinedCutRegion, \
-    YTMixedCutRegion
-from yt.utilities.linear_interpolators import TrilinearFieldInterpolator
+    YTMixedCutRegion, \
+    YTEllipsoidOrdering
 from yt.utilities.minimal_representation import \
     MinimalSliceData
 from yt.utilities.math_utils import get_rotation_matrix
-from yt.units.yt_array import YTQuantity
+from yt.utilities.orientation import Orientation
 
 
 class YTPoint(YTSelectionContainer0D):
@@ -364,7 +364,6 @@
         self._x_vec = self.orienter.unit_vectors[0]
         self._y_vec = self.orienter.unit_vectors[1]
         # First we try all three, see which has the best result:
-        vecs = np.identity(3)
         self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
         self._inv_mat = np.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -14,15 +14,26 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import string, re, gc, time, os, os.path, weakref
 import functools
+import numpy as np
+import os
+import time
+import weakref
 
-from yt.funcs import *
+from collections import defaultdict
 from yt.extern.six import add_metaclass
 
 from yt.config import ytcfg
+from yt.funcs import \
+    mylog, \
+    set_intersection, \
+    ensure_list
 from yt.utilities.cosmology import \
-     Cosmology
+    Cosmology
+from yt.utilities.exceptions import \
+    YTObjectNotImplemented, \
+    YTFieldNotFound, \
+    YTGeometryNotSupported
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
 from yt.utilities.parameter_file_storage import \
@@ -33,8 +44,6 @@
 from yt.units.unit_registry import UnitRegistry
 from yt.fields.derived_field import \
     ValidateSpatial
-from yt.fields.field_info_container import \
-    FieldInfoContainer, NullFunc
 from yt.fields.fluid_fields import \
     setup_gradient_fields
 from yt.fields.particle_fields import \
@@ -129,7 +138,6 @@
     _instantiated = False
 
     def __new__(cls, filename=None, *args, **kwargs):
-        from yt.frontends.stream.data_structures import StreamHandler
         if not isinstance(filename, str):
             obj = object.__new__(cls)
             # The Stream frontend uses a StreamHandler object to pass metadata
@@ -140,7 +148,6 @@
                 obj.__init__(filename, *args, **kwargs)
             return obj
         apath = os.path.abspath(filename)
-        #if not os.path.exists(apath): raise IOError(filename)
         if ytcfg.getboolean("yt","skip_dataset_cache"):
             obj = object.__new__(cls)
         elif apath not in _cached_datasets:
@@ -448,7 +455,7 @@
         # Give ourselves a chance to add them here, first, then...
         # ...if we can't find them, we set them up as defaults.
         new_fields = self._setup_particle_types([union.name])
-        rv = self.field_info.find_dependencies(new_fields)
+        self.field_info.find_dependencies(new_fields)
 
     def add_particle_filter(self, filter):
         # This requires an index

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/tests/test_chunking.py
--- a/yt/data_objects/tests/test_chunking.py
+++ b/yt/data_objects/tests/test_chunking.py
@@ -1,4 +1,8 @@
-from yt.testing import *
+from yt.testing import \
+    fake_random_ds, \
+    assert_equal
+from yt.units.yt_array import \
+    uconcatenate
 
 def _get_dobjs(c):
     dobjs = [("sphere", ("center", (1.0, "unitary"))),

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/tests/test_compose.py
--- a/yt/data_objects/tests/test_compose.py
+++ b/yt/data_objects/tests/test_compose.py
@@ -1,6 +1,11 @@
-from yt.testing import *
-from yt.fields.local_fields import add_field
-from yt.units.yt_array import YTArray, uintersect1d
+import numpy as np
+
+from yt.testing import \
+    fake_random_ds, \
+    assert_array_equal
+from yt.units.yt_array import \
+    YTArray, \
+    uintersect1d
 
 def setup():
     from yt.config import ytcfg
@@ -72,9 +77,8 @@
 
 def test_compose_overlap():
     r"""Test to make sure that composed data objects that do
-    overlap behave the way we expect 
+    overlap behave the way we expect
     """
-    empty = np.array([])
     for n in [1, 2, 4, 8]:
         ds = fake_random_ds(64, nprocs=n)
         ds.add_field(("index", "ID"), function=_IDFIELD)

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -1,7 +1,9 @@
-from yt.testing import *
-from yt.data_objects.profiles import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
-from yt.frontends.stream.api import load_particles
+import numpy as np
+
+from yt.frontends.stream.data_structures import load_particles
+from yt.testing import \
+    fake_random_ds, \
+    assert_equal
 
 def setup():
     from yt.config import ytcfg

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/tests/test_data_collection.py
--- a/yt/data_objects/tests/test_data_collection.py
+++ b/yt/data_objects/tests/test_data_collection.py
@@ -1,4 +1,9 @@
-from yt.testing import *
+import numpy as np
+
+from yt.testing import \
+    fake_random_ds, \
+    assert_equal, \
+    assert_rel_equal
 
 def setup():
     from yt.config import ytcfg

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/tests/test_derived_quantities.py
--- a/yt/data_objects/tests/test_derived_quantities.py
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -1,6 +1,10 @@
-from yt.testing import *
 import numpy as np
 
+from yt.testing import \
+    fake_random_ds, \
+    assert_equal, \
+    assert_rel_equal
+
 def setup():
     from yt.config import ytcfg
     ytcfg["yt","__withintesting"] = "True"

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/tests/test_ellipsoid.py
--- a/yt/data_objects/tests/test_ellipsoid.py
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -1,4 +1,8 @@
-from yt.testing import *
+import numpy as np
+
+from yt.testing import \
+    fake_random_ds, \
+    assert_array_less
 
 def setup():
     from yt.config import ytcfg

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/tests/test_extract_regions.py
--- a/yt/data_objects/tests/test_extract_regions.py
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -1,4 +1,10 @@
-from yt.testing import *
+import numpy as np
+
+from yt.testing import \
+    fake_random_ds, \
+    assert_equal, \
+    assert_almost_equal
+
 
 def setup():
     from yt.config import ytcfg

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/tests/test_fluxes.py
--- a/yt/data_objects/tests/test_fluxes.py
+++ b/yt/data_objects/tests/test_fluxes.py
@@ -1,4 +1,7 @@
-from yt.testing import *
+from yt.testing import \
+    fake_random_ds, \
+    assert_almost_equal, \
+    assert_equal
 
 def setup():
     from yt.config import ytcfg

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/tests/test_ortho_rays.py
--- a/yt/data_objects/tests/test_ortho_rays.py
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -1,4 +1,9 @@
-from yt.testing import *
+import numpy as np
+
+from yt.testing import \
+    fake_random_ds, \
+    assert_equal
+
 
 def test_ortho_ray():
     ds = fake_random_ds(64, nprocs=8)

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/tests/test_points.py
--- a/yt/data_objects/tests/test_points.py
+++ b/yt/data_objects/tests/test_points.py
@@ -1,10 +1,10 @@
-from yt.testing import *
-import numpy as np
+from yt.testing import fake_random_ds
 
 def setup():
     from yt.config import ytcfg
     ytcfg["yt","__withintesting"] = "True"
 
 def test_domain_point():
-    ds = fake_random_ds(16, fields = ("density"))
+    ds = fake_random_ds(16)
     p = ds.point(ds.domain_center)
+    p['density']

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -1,9 +1,14 @@
-from yt.testing import *
+import numpy as np
+
 from yt.data_objects.profiles import \
     Profile1D, \
     Profile2D, \
     Profile3D, \
     create_profile
+from yt.testing import \
+    fake_random_ds, \
+    assert_equal, \
+    assert_rel_equal
 
 _fields = ("density", "temperature", "dinosaurs", "tribbles")
 _units = ("g/cm**3", "K", "dyne", "erg")

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -36,7 +36,6 @@
         xf, yf, zf = ds.domain_right_edge.to_ndarray() - \
             1.0 / (ds.domain_dimensions * 2)
         dd = ds.all_data()
-        rho_tot = dd.quantities["TotalQuantity"]("density")
         coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
         uc = [np.unique(c) for c in coords]
         # test if projections inherit the field parameters of their data sources

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/tests/test_rays.py
--- a/yt/data_objects/tests/test_rays.py
+++ b/yt/data_objects/tests/test_rays.py
@@ -1,4 +1,11 @@
-from yt.testing import *
+import numpy as np
+
+from yt.testing import \
+    fake_random_ds, \
+    assert_equal, \
+    assert_rel_equal
+from yt.units.yt_array import \
+    uconcatenate
 
 def test_ray():
     for nproc in [1, 2, 4, 8]:
@@ -36,7 +43,6 @@
             tin = tin.max(axis=0)
             tout = tout.min(axis=0)
             my_cells = (tin < tout) & (tin < 1) & (tout > 0)
-            dts = np.clip(tout[my_cells], 0.0, 1.0) - np.clip(tin[my_cells], 0.0, 1.0)
 
             yield assert_equal, ray_cells.sum(), my_cells.sum()
             yield assert_rel_equal, my_ray['density'][ray_cells].sum(), \

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/tests/test_spheres.py
--- a/yt/data_objects/tests/test_spheres.py
+++ b/yt/data_objects/tests/test_spheres.py
@@ -1,6 +1,10 @@
+import numpy as np
+
 from yt.data_objects.profiles import create_profile
-from yt.testing import *
-import numpy as np
+from yt.testing import \
+    fake_random_ds, \
+    assert_equal, \
+    periodicity_cases
 
 def setup():
     from yt.config import ytcfg

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/tests/test_streamlines.py
--- a/yt/data_objects/tests/test_streamlines.py
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -1,5 +1,10 @@
-from yt.testing import *
-from yt.visualization.api import Streamlines
+import numpy as np
+
+from yt.testing import \
+    fake_random_ds, \
+    assert_rel_equal, \
+    assert_equal
+from yt.visualization.streamlines import Streamlines
 
 def setup():
     from yt.config import ytcfg

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -13,17 +13,33 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import inspect, functools, weakref, glob, types, os
+import inspect
+import functools
+import glob
+import numpy as np
+import os
+import weakref
 
-from yt.funcs import *
+from functools import wraps
+
 from yt.extern.six import add_metaclass, string_types
 from yt.convenience import load
 from yt.config import ytcfg
-from .data_containers import data_object_registry
-from .analyzer_objects import create_quantity_proxy, \
-    analysis_task_registry, AnalysisTask
+from yt.data_objects.data_containers import data_object_registry
+from yt.data_objects.derived_quantities import \
+    derived_quantity_registry
+from yt.data_objects.analyzer_objects import \
+    create_quantity_proxy, \
+    analysis_task_registry, \
+    AnalysisTask
+from yt.funcs import \
+    iterable, \
+    ensure_list, \
+    mylog
 from yt.units.yt_array import YTArray, YTQuantity
-from yt.utilities.exceptions import YTException
+from yt.utilities.exceptions import \
+    YTException, \
+    YTOutputNotIdentified
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import parallel_objects, parallel_root_only
 from yt.utilities.parameter_file_storage import \
@@ -285,7 +301,7 @@
                     if style == 'ds':
                         arg = ds
                     elif style == 'data_object':
-                        if obj == None:
+                        if obj is None:
                             obj = DatasetSeriesObject(self, "all_data")
                         arg = obj.get(ds)
                     rv = task.eval(arg)
@@ -377,7 +393,7 @@
         if key not in self.quantities: raise KeyError(key)
         q = self.quantities[key]
         def run_quantity_wrapper(quantity, quantity_name):
-            @wraps(quantity_info[quantity_name][1])
+            @wraps(derived_quantity_registry[quantity_name][1])
             def run_quantity(*args, **kwargs):
                 to_run = quantity(*args, **kwargs)
                 return self.data_object.eval(to_run)
@@ -390,7 +406,7 @@
         self.data_object_name = data_object_name
         self._args = args
         self._kwargs = kwargs
-        qs = dict([(qn, create_quantity_proxy(qv)) for qn, qv in quantity_info.items()])
+        qs = dict([(qn, create_quantity_proxy(qv)) for qn, qv in derived_quantity_registry.items()])
         self.quantities = TimeSeriesQuantitiesContainer(self, qs)
 
     def eval(self, tasks):

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -13,7 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import weakref
 import numpy as np
 
 from yt.funcs import mylog

diff -r 84f2a7849d3d0c74ad581109954fce6230d820d9 -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -46,7 +46,7 @@
     requires a deposition step, where individual variable-resolution pixels
     are deposited into a buffer of some resolution, to create an image.
     This object is an interface to that pixelization step: it can deposit
-    multiple fields.  It acts as a standard AMRData object, such that
+    multiple fields.  It acts as a standard YTDataContainer object, such that
     dict-style access returns an image of a given field.
 
     Parameters


https://bitbucket.org/yt_analysis/yt/commits/f1bf7041463f/
Changeset:   f1bf7041463f
Branch:      yt
User:        ngoldbaum
Date:        2015-11-04 22:18:19+00:00
Summary:     Re-linting yt.visualization
Affected #:  4 files

diff -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 -r f1bf7041463f1b1a96469ed7c28002bd1c40a6c2 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -33,11 +33,10 @@
 from yt.units.yt_array import YTQuantity, YTArray
 from yt.visualization.image_writer import apply_colormap
 from yt.utilities.lib.geometry_utils import triangle_plane_intersect
-from yt.analysis_modules.cosmological_observation.light_ray.light_ray \
-     import periodic_ray
+from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \
+    periodic_ray
 from yt.utilities.lib.line_integral_convolution import \
     line_integral_convolution_2d
-import warnings
 
 
 from . import _MPL
@@ -2167,7 +2166,7 @@
         vectors = np.concatenate((pixX[...,np.newaxis],
                                   pixY[...,np.newaxis]),axis=2)
 
-        if self.texture == None:
+        if self.texture is None:
             self.texture = np.random.rand(nx,ny).astype(np.double)
         elif self.texture.shape != (nx,ny):
             raise SyntaxError("'texture' must have the same shape "

diff -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 -r f1bf7041463f1b1a96469ed7c28002bd1c40a6c2 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -13,7 +13,7 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-import base64
+
 import numpy as np
 import matplotlib
 import types
@@ -24,8 +24,6 @@
 from matplotlib.mathtext import MathTextParser
 from numbers import Number
 
-from ._mpl_imports import FigureCanvasAgg
-from .image_writer import apply_colormap
 from .base_plot_types import ImagePlotMPL
 from .fixed_resolution import \
     FixedResolutionBuffer, \
@@ -43,8 +41,6 @@
     DatasetSeries
 from yt.data_objects.image_array import \
     ImageArray
-from yt.extern.six.moves import \
-    StringIO
 from yt.extern.six import string_types
 from yt.frontends.ytdata.data_structures import \
     YTSpatialPlotDataset
@@ -59,8 +55,6 @@
     prefixable_units, latex_prefixes
 from yt.units.yt_array import \
     YTArray, YTQuantity
-from yt.utilities.png_writer import \
-    write_png_to_string
 from yt.utilities.definitions import \
     formatted_length_unit_names
 from yt.utilities.math_utils import \

diff -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 -r f1bf7041463f1b1a96469ed7c28002bd1c40a6c2 yt/visualization/tests/test_splat.py
--- a/yt/visualization/tests/test_splat.py
+++ b/yt/visualization/tests/test_splat.py
@@ -19,7 +19,7 @@
 import numpy as np
 import yt
 from yt.testing import \
-    assert_equal, expand_keywords
+    assert_equal
 from yt.utilities.lib.api import add_rgba_points_to_image
 
 

diff -r 5ef6cc967fe88ea8d60bd9d53e65971b7b4b18b5 -r f1bf7041463f1b1a96469ed7c28002bd1c40a6c2 yt/visualization/volume_rendering/camera_path.py
--- a/yt/visualization/volume_rendering/camera_path.py
+++ b/yt/visualization/volume_rendering/camera_path.py
@@ -16,7 +16,7 @@
 
 import random
 import numpy as np
-from .create_spline import create_spline
+from yt.visualization.volume_rendering.create_spline import create_spline
 
 class Keyframes(object):
     def __init__(self, x, y, z=None, north_vectors=None, up_vectors=None,
@@ -74,7 +74,7 @@
         """
         Nx = len(x)
         Ny = len(y)
-        if z != None:
+        if z is not None:
             Nz = len(z)
             ndims = 3
         else:
@@ -82,18 +82,18 @@
             ndims = 2
         if Nx*Ny*Nz != Nx**ndims:
             print("Need Nx (%d) == Ny (%d) == Nz (%d)" % (Nx, Ny, Nz))
-            sys.exit()
+            raise RuntimeError
         self.nframes = Nx
         self.pos = np.zeros((Nx,3))
         self.pos[:,0] = x
         self.pos[:,1] = y
-        if z != None:
+        if z is not None:
             self.pos[:,2] = z
         else:
             self.pos[:,2] = 0.0
         self.north_vectors = north_vectors
         self.up_vectors = up_vectors
-        if times == None:
+        if times is None:
             self.times = np.arange(self.nframes)
         else:
             self.times = times
@@ -227,7 +227,8 @@
         ----------
         None.
         """
-        self.setup_tsp(niter, init_temp, alpha, fixed_start)
+        # this obviously doesn't work. When someone fixes it, remove the NOQA
+        self.setup_tsp(niter, init_temp, alpha, fixed_start)  # NOQA
         num_eval = 1
         cooling_schedule = self.cooling()
         current = self.tour
@@ -257,9 +258,9 @@
             if done:
                 break
         self.pos = self.pos[self.tour,:]
-        if self.north_vectors != None:
+        if self.north_vectors is not None:
             self.north_vectors = self.north_vectors[self.tour]
-        if self.up_vectors != None:
+        if self.up_vectors is not None:
             self.up_vectors = self.up_vectors[self.tour]
 
     def create_path(self, npoints, path_time=None, tension=0.5, shortest_path=False):
@@ -291,17 +292,17 @@
                      "up_vectors": np.zeros((npoints,3))}
         if shortest_path:
             self.get_shortest_path()
-        if path_time == None:
+        if path_time is None:
             path_time = np.linspace(0, self.nframes, npoints)
         self.path["time"] = path_time
         for dim in range(3):
             self.path["position"][:,dim] = create_spline(self.times, self.pos[:,dim],
                                                          path_time, tension=tension)
-            if self.north_vectors != None:
+            if self.north_vectors is not None:
                 self.path["north_vectors"][:,dim] = \
                     create_spline(self.times, self.north_vectors[:,dim],
                                   path_time, tension=tension)
-            if self.up_vectors != None:
+            if self.up_vectors is not None:
                 self.path["up_vectors"][:,dim] = \
                     create_spline(self.times, self.up_vectors[:,dim],
                                   path_time, tension=tension)


https://bitbucket.org/yt_analysis/yt/commits/3764648e1f5b/
Changeset:   3764648e1f5b
Branch:      yt
User:        ngoldbaum
Date:        2015-11-04 22:27:59+00:00
Summary:     Linting the ytdata frontend
Affected #:  5 files

diff -r f1bf7041463f1b1a96469ed7c28002bd1c40a6c2 -r 3764648e1f5ba8f15624aa39d213e7976d539e7c yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -21,7 +21,6 @@
 import numpy as np
 import os
 import stat
-import time
 import weakref
 
 from .fields import \
@@ -40,7 +39,6 @@
     Dataset, \
     ParticleFile
 from yt.extern.six import \
-    iteritems, \
     string_types
 from yt.geometry.grid_geometry_handler import \
     GridIndex
@@ -50,8 +48,6 @@
     YTQuantity
 from yt.utilities.logger import \
     ytLogger as mylog
-from yt.utilities.cosmology import \
-    Cosmology
 from yt.utilities.exceptions import \
     YTFieldTypeNotFound
 from yt.utilities.on_demand_imports import \
@@ -422,7 +418,7 @@
             fields = self._determine_fields(key)
         except YTFieldTypeNotFound:
             return tr
-        finfo = self.ds._get_field_info(*fields[0])
+        self.ds._get_field_info(*fields[0])
         return tr
 
     def get_data(self, fields=None):
@@ -461,7 +457,7 @@
             fields_to_get.append(field)
         if len(fields_to_get) == 0 and len(fields_to_generate) == 0:
             return
-        elif self._locked == True:
+        elif self._locked is True:
             raise GenerationInProgress(fields)
         # Track which ones we want in the end
         ofields = set(list(self.field_data.keys())

diff -r f1bf7041463f1b1a96469ed7c28002bd1c40a6c2 -r 3764648e1f5ba8f15624aa39d213e7976d539e7c yt/frontends/ytdata/fields.py
--- a/yt/frontends/ytdata/fields.py
+++ b/yt/frontends/ytdata/fields.py
@@ -14,9 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
 
-from yt.funcs import mylog
 from yt.fields.field_info_container import \
     FieldInfoContainer
 

diff -r f1bf7041463f1b1a96469ed7c28002bd1c40a6c2 -r 3764648e1f5ba8f15624aa39d213e7976d539e7c yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -14,7 +14,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
 import numpy as np
 
 from yt.extern.six import \
@@ -27,6 +26,8 @@
     BaseIOHandler
 from yt.utilities.lib.geometry_utils import \
     compute_morton
+from yt.utilities.on_demand_imports import \
+    _h5py as h5py
 
 class IOHandlerYTNonspatialhdf5(BaseIOHandler):
     _dataset_type = "ytnonspatialhdf5"
@@ -102,7 +103,6 @@
         mylog.debug("Reading %s cells of %s fields in %s grids",
                    size, [f2 for f1, f2 in fields], ng)
         ind = 0
-        h5_type = self._field_dtype
         for chunk in chunks:
             f = None
             for g in chunk.objs:
@@ -202,7 +202,6 @@
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         for data_file in sorted(data_files):
-            all_count = self._count_particles(data_file)
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     x = _get_position_array(ptype, f, "x")
@@ -224,7 +223,7 @@
         ind = 0
         with h5py.File(data_file.filename, "r") as f:
             for ptype in all_count:
-                if not ptype in f or all_count[ptype] == 0: continue
+                if ptype not in f or all_count[ptype] == 0: continue
                 pos = np.empty((all_count[ptype], 3), dtype="float64")
                 pos = data_file.ds.arr(pos, "code_length")
                 if ptype == "grid":
@@ -320,7 +319,7 @@
         ind = 0
         with h5py.File(data_file.filename, "r") as f:
             for ptype in all_count:
-                if not ptype in f or all_count[ptype] == 0: continue
+                if ptype not in f or all_count[ptype] == 0: continue
                 pos = np.empty((all_count[ptype], 3), dtype="float64")
                 pos = data_file.ds.arr(pos, "code_length")
                 if ptype == "grid":

diff -r f1bf7041463f1b1a96469ed7c28002bd1c40a6c2 -r 3764648e1f5ba8f15624aa39d213e7976d539e7c yt/frontends/ytdata/setup.py
--- a/yt/frontends/ytdata/setup.py
+++ b/yt/frontends/ytdata/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r f1bf7041463f1b1a96469ed7c28002bd1c40a6c2 -r 3764648e1f5ba8f15624aa39d213e7976d539e7c yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -14,7 +14,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
 import numpy as np
 
 from yt.funcs import iterable
@@ -137,7 +136,7 @@
             field_name = field[1]
         else:
             field_name = field
-        dataset = _yt_array_hdf5(fh[field_type], field_name, data[field])
+        _yt_array_hdf5(fh[field_type], field_name, data[field])
         if "num_elements" not in fh[field_type].attrs:
             fh[field_type].attrs["num_elements"] = data[field].size
     fh.close()


https://bitbucket.org/yt_analysis/yt/commits/ba6cf032b66b/
Changeset:   ba6cf032b66b
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 03:16:06+00:00
Summary:     Removing tests in the top-level tests folder and old answer testing support

These tests are not run by the current testing infrastructure. In the interest
of clarity to new users (who might think these tests are useful since they are
at the root of the repository) I'm removing them. I've left in the test enzo
dataset to avoid adding 2.4 MB to the repository.
Affected #:  23 files

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -59,7 +59,7 @@
   from yt.analysis_modules.halo_finding.api import *
 
   ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
-  halo_list = parallelHF(ds)
+  halo_list = HaloFinder(ds)
   halo_list.dump('MyHaloList')
 
 Ellipsoid Parameters

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -501,11 +501,7 @@
 subtle art in estimating the amount of memory needed for halo finding, but a
 rule of thumb is that the HOP halo finder is the most memory intensive
 (:func:`HaloFinder`), and Friends of Friends (:func:`FOFHaloFinder`) being the
-most memory-conservative.  It has been found that :func:`parallelHF` needs
-roughly 1 MB of memory per 5,000 particles, although recent work has improved
-this and the memory requirement is now smaller than this. But this is a good
-starting point for beginning to calculate the memory required for halo-finding.
-For more information, see :ref:`halo_finding`.
+most memory-conservative. For more information, see :ref:`halo_finding`.
 
 **Volume Rendering**
 

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db tests/README
--- /dev/null
+++ b/tests/README
@@ -0,0 +1,3 @@
+This directory contains two tiny enzo cosmological datasets. 
+
+They were added a long time ago and are provided for testing purposes.
\ No newline at end of file

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db tests/boolean_regions.py
--- a/tests/boolean_regions.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.boolean_region_tests import \
-    TestBooleanANDGridQuantity, TestBooleanORGridQuantity, \
-    TestBooleanNOTGridQuantity, TestBooleanANDParticleQuantity, \
-    TestBooleanORParticleQuantity, TestBooleanNOTParticleQuantity
-
-create_test(TestBooleanANDGridQuantity, "BooleanANDGrid")
-
-create_test(TestBooleanORGridQuantity, "BooleanORGrid")
-
-create_test(TestBooleanNOTGridQuantity, "BooleanNOTGrid")
-
-create_test(TestBooleanANDParticleQuantity, "BooleanANDParticle")
-
-create_test(TestBooleanORParticleQuantity, "BooleanORParticle")
-
-create_test(TestBooleanNOTParticleQuantity, "BooleanNOTParticle")

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db tests/fields_to_test.py
--- a/tests/fields_to_test.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# We want to test several things.  We need to be able to run the
-
-field_list = ["Density", "Temperature", "x-velocity", "y-velocity",
-    "z-velocity",
-    # Now some derived fields
-    "Pressure", "SoundSpeed", "particle_density", "Entropy",
-    # Ghost zones
-    "AveragedDensity", "DivV"]
-
-particle_field_list = ["particle_position_x", "ParticleMassMsun"]

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db tests/halos.py
--- a/tests/halos.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.halo_tests import \
-    TestHaloCountHOP, TestHaloCountFOF, TestHaloCountPHOP
-
-create_test(TestHaloCountHOP, "halo_count_HOP", threshold=80.0)
-
-create_test(TestHaloCountFOF, "halo_count_FOF", link=0.2, padding=0.02)
-
-create_test(TestHaloCountPHOP, "halo_count_PHOP", threshold=80.0)

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db tests/hierarchy_consistency.py
--- a/tests/hierarchy_consistency.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTDatasetTest, RegressionTestException
-from yt.funcs import ensure_list
-
-
-class HierarchyInconsistent(RegressionTestException):
-    pass
-
-
-class HierarchyConsistency(YTDatasetTest):
-    name = "index_consistency"
-
-    def run(self):
-        self.result = \
-            all(g in ensure_list(c.Parent) for g in self.ds.index.grids
-                                            for c in g.Children)
-
-    def compare(self, old_result):
-        if not(old_result and self.result): raise HierarchyInconsistent()
-
-
-class GridLocationsProperties(YTDatasetTest):
-    name = "level_consistency"
-
-    def run(self):
-        self.result = dict(grid_left_edge=self.ds.grid_left_edge,
-                           grid_right_edge=self.ds.grid_right_edge,
-                           grid_levels=self.ds.grid_levels,
-                           grid_particle_count=self.ds.grid_particle_count,
-                           grid_dimensions=self.ds.grid_dimensions)
-
-    def compare(self, old_result):
-        # We allow now difference between these values
-        self.compare_data_arrays(self.result, old_result, 0.0)
-
-
-class GridRelationshipsChanged(RegressionTestException):
-    pass
-
-
-class GridRelationships(YTDatasetTest):
-
-    name = "grid_relationships"
-
-    def run(self):
-        self.result = [[p.id for p in ensure_list(g.Parent) \
-            if g.Parent is not None]
-            for g in self.ds.index.grids]
-
-    def compare(self, old_result):
-        if len(old_result) != len(self.result):
-            raise GridRelationshipsChanged()
-        for plist1, plist2 in zip(old_result, self.result):
-            if len(plist1) != len(plist2): raise GridRelationshipsChanged()
-            if not all((p1 == p2 for p1, p2 in zip(plist1, plist2))):
-                raise GridRelationshipsChanged()
-
-
-class GridGlobalIndices(YTDatasetTest):
-    name = "global_startindex"
-
-    def run(self):
-        self.result = na.array([g.get_global_startindex()
-                                for g in self.ds.index.grids])
-
-    def compare(self, old_result):
-        self.compare_array_delta(old_result, self.result, 0.0)

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db tests/object_field_values.py
--- a/tests/object_field_values.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import hashlib
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTDatasetTest, RegressionTestException, create_test
-from yt.funcs import ensure_list, iterable
-from fields_to_test import field_list, particle_field_list
-
-
-class FieldHashesDontMatch(RegressionTestException):
-    pass
-
-known_objects = {}
-
-
-def register_object(func):
-    known_objects[func.func_name] = func
-    return func
-
-
- at register_object
-def centered_sphere(tobj):
-    center = 0.5 * (tobj.ds.domain_right_edge + tobj.ds.domain_left_edge)
-    width = (tobj.ds.domain_right_edge - tobj.ds.domain_left_edge).max()
-    tobj.data_object = tobj.ds.sphere(center, width / 0.25)
-
-
- at register_object
-def off_centered_sphere(tobj):
-    center = 0.5 * (tobj.ds.domain_right_edge + tobj.ds.domain_left_edge)
-    width = (tobj.ds.domain_right_edge - tobj.ds.domain_left_edge).max()
-    tobj.data_object = tobj.ds.sphere(center - 0.25 * width, width / 0.25)
-
-
- at register_object
-def corner_sphere(tobj):
-    width = (tobj.ds.domain_right_edge - tobj.ds.domain_left_edge).max()
-    tobj.data_object = tobj.ds.sphere(tobj.ds.domain_left_edge, width / 0.25)
-
-
- at register_object
-def disk(self):
-    center = (self.ds.domain_right_edge + self.ds.domain_left_edge) / 2.
-    radius = (self.ds.domain_right_edge - self.ds.domain_left_edge).max() / 10.
-    height = (self.ds.domain_right_edge - self.ds.domain_left_edge).max() / 10.
-    normal = na.array([1.] * 3)
-    self.data_object = self.ds.disk(center, normal, radius, height)
-
-
- at register_object
-def all_data(self):
-    self.data_object = self.ds.all_data()
-
-_new_known_objects = {}
-for field in ["Density"]:  # field_list:
-    for object_name in known_objects:
-
-        def _rfunc(oname, fname):
-
-            def func(tobj):
-                known_objects[oname](tobj)
-                tobj.orig_data_object = tobj.data_object
-                avg_value = tobj.orig_data_object.quantities[
-                        "WeightedAverageQuantity"](fname, "Density")
-                tobj.data_object = tobj.orig_data_object.cut_region(
-                        ["grid['%s'] > %s" % (fname, avg_value)])
-            return func
-        _new_known_objects["%s_cut_region_%s" % (object_name, field)] = \
-                _rfunc(object_name, field)
-known_objects.update(_new_known_objects)
-
-
-class YTFieldValuesTest(YTDatasetTest):
-
-    def run(self):
-        vals = self.data_object[self.field].copy()
-        vals.sort()
-        self.result = hashlib.sha256(vals.tostring()).hexdigest()
-
-    def compare(self, old_result):
-        if self.result != old_result: raise FieldHashesDontMatch
-
-    def setup(self):
-        YTDatasetTest.setup(self)
-        known_objects[self.object_name](self)
-
-
-class YTExtractIsocontoursTest(YTFieldValuesTest):
-
-    def run(self):
-        val = self.data_object.quantities["WeightedAverageQuantity"](
-            "Density", "Density")
-        rset = self.data_object.extract_isocontours("Density",
-            val, rescale=False, sample_values="Temperature")
-        self.result = rset
-
-    def compare(self, old_result):
-        if self.result[0].size == 0 and old_result[0].size == 0:
-            return True
-        self.compare_array_delta(self.result[0].ravel(),
-                                 old_result[0].ravel(), 1e-7)
-        self.compare_array_delta(self.result[1], old_result[1], 1e-7)
-
-
-class YTIsocontourFluxTest(YTFieldValuesTest):
-
-    def run(self):
-        val = self.data_object.quantities["WeightedAverageQuantity"](
-            "Density", "Density")
-        flux = self.data_object.calculate_isocontour_flux(
-           "Density", val, "x-velocity", "y-velocity", "z-velocity")
-        self.result = flux
-
-    def compare(self, old_result):
-        self.compare_value_delta(self.result, old_result, 1e-7)
-
-for object_name in known_objects:
-    for field in field_list + particle_field_list:
-        if "cut_region" in object_name and field in particle_field_list:
-            continue
-        create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
-                    field=field, object_name=object_name)
-    create_test(YTExtractIsocontoursTest, "%s" % (object_name),
-                object_name=object_name)
-    create_test(YTIsocontourFluxTest, "%s" % (object_name),
-                object_name=object_name)
-
-
-class YTDerivedQuantityTest(YTDatasetTest):
-
-    def setup(self):
-        YTDatasetTest.setup(self)
-        known_objects[self.object_name](self)
-
-    def compare(self, old_result):
-        if hasattr(self.result, 'tostring'):
-            self.compare_array_delta(self.result, old_result, 1e-7)
-            return
-        elif iterable(self.result):
-            a1 = na.array(self.result)
-            a2 = na.array(old_result)
-            self.compare_array_delta(a1, a2, 1e-7)
-        else:
-            if self.result != old_result: raise FieldHashesDontMatch
-
-    def run(self):
-        # This only works if it takes no arguments
-        self.result = self.data_object.quantities[self.dq_name]()
-
-dq_names = ["TotalMass", "AngularMomentumVector", "CenterOfMass",
-            "BulkVelocity", "BaryonSpinParameter", "ParticleSpinParameter"]
-
-# Extrema, WeightedAverageQuantity, TotalQuantity, MaxLocation,
-# MinLocation
-
-for object_name in known_objects:
-    for dq in dq_names:
-        # Some special exceptions
-        if "cut_region" in object_name and (
-            "SpinParameter" in dq or
-            "TotalMass" in dq):
-            continue
-        create_test(YTDerivedQuantityTest, "%s_%s" % (object_name, dq),
-                    dq_name=dq, object_name=object_name)
-
-
-class YTDerivedQuantityTestField(YTDerivedQuantityTest):
-
-    def run(self):
-        self.result = self.data_object.quantities[self.dq_name](
-            self.field_name)
-
-for object_name in known_objects:
-    for field in field_list:
-        for dq in ["Extrema", "TotalQuantity", "MaxLocation", "MinLocation"]:
-            create_test(YTDerivedQuantityTestField,
-                        "%s_%s" % (object_name, field),
-                        field_name=field, dq_name=dq,
-                        object_name=object_name)
-
-
-class YTDerivedQuantityTest_WeightedAverageQuantity(YTDerivedQuantityTest):
-
-    def run(self):
-        self.result = self.data_object.quantities["WeightedAverageQuantity"](
-            self.field_name, weight="CellMassMsun")
-
-for object_name in known_objects:
-    for field in field_list:
-        create_test(YTDerivedQuantityTest_WeightedAverageQuantity,
-                    "%s_%s" % (object_name, field),
-                    field_name=field,
-                    object_name=object_name)

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db tests/projections.py
--- a/tests/projections.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestOffAxisProjection, TestSlice, \
-    TestRay, TestGasDistribution, Test2DGasDistribution
-
-from fields_to_test import field_list
-
-for field in field_list:
-    create_test(TestRay, "%s" % field, field=field)
-
-for axis in range(3):
-    for field in field_list:
-        create_test(TestSlice, "%s_%s" % (axis, field),
-                    field=field, axis=axis)
-
-for axis in range(3):
-    for field in field_list:
-        create_test(TestProjection, "%s_%s" % (axis, field),
-                    field=field, axis=axis)
-        create_test(TestProjection, "%s_%s_Density" % (axis, field),
-                    field=field, axis=axis, weight_field="Density")
-
-for field in field_list:
-    create_test(TestOffAxisProjection, "%s_%s" % (axis, field),
-                field=field, axis=axis)
-    create_test(TestOffAxisProjection, "%s_%s_Density" % (axis, field),
-                field=field, axis=axis, weight_field="Density")
-
-for field in field_list:
-    if field != "Density":
-        create_test(TestGasDistribution, "density_%s" % field,
-                    field_x="Density", field_y=field)
-    if field not in ("x-velocity", "Density"):
-        create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
-                    field_x="Density", field_y="x-velocity", field_z=field,
-                    weight="CellMassMsun")

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db tests/runall.py
--- a/tests/runall.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import matplotlib
-matplotlib.use('Agg')
-from yt.config import ytcfg
-ytcfg["yt", "loglevel"] = "50"
-ytcfg["yt", "serialize"] = "False"
-
-from yt.utilities.answer_testing.api import \
-    RegressionTestRunner, clear_registry, create_test, \
-    TestFieldStatistics, TestAllProjections, registry_entries, \
-    Xunit
-from yt.utilities.command_line import get_yt_version
-
-from yt.mods import *
-import fnmatch
-import imp
-import optparse
-import itertools
-import time
-
-#
-# We assume all tests are to be run, unless explicitly given the name of a
-# single test or something that can be run through fnmatch.
-#
-# Keep in mind that we use a different nomenclature here than is used in the
-# Enzo testing system.  Our 'tests' are actually tests that are small and that
-# run relatively quickly on a single dataset; in Enzo's system, a 'test'
-# encompasses both the creation and the examination of data.  Here we assume
-# the data is kept constant.
-#
-
-cwd = os.path.dirname(globals().get("__file__", os.getcwd()))
-
-
-def load_tests(iname, idir):
-    f, filename, desc = imp.find_module(iname, [idir])
-    tmod = imp.load_module(iname, f, filename, desc)
-    return tmod
-
-
-def find_and_initialize_tests():
-    mapping = {}
-    for f in glob.glob(os.path.join(cwd, "*.py")):
-        clear_registry()
-        iname = os.path.basename(f[:-3])
-        try:
-            load_tests(iname, cwd)
-            mapping[iname] = registry_entries()
-            #print "Associating %s with" % (iname)
-            #print "\n    ".join(registry_entries())
-        except ImportError:
-            pass
-    return mapping
-
-if __name__ == "__main__":
-    clear_registry()
-    mapping = find_and_initialize_tests()
-    test_storage_directory = ytcfg.get("yt", "test_storage_dir")
-    try:
-        my_hash = get_yt_version()
-    except:
-        my_hash = "UNKNOWN%s" % (time.time())
-    parser = optparse.OptionParser()
-    parser.add_option("-f", "--parameter-file", dest="parameter_file",
-        default=os.path.join(cwd, "DD0010/moving7_0010"),
-        help="The parameter file value to feed to 'load' to test against")
-    parser.add_option("-l", "--list", dest="list_tests", action="store_true",
-        default=False, help="List all tests and then exit")
-    parser.add_option("-t", "--tests", dest="test_pattern", default="*",
-        help="The test name pattern to match.  Can include wildcards.")
-    parser.add_option("-o", "--output", dest="storage_dir",
-        default=test_storage_directory,
-        help="Base directory for storing test output.")
-    parser.add_option("-c", "--compare", dest="compare_name",
-        default=None,
-        help="The name against which we will compare")
-    parser.add_option("-n", "--name", dest="this_name",
-        default=my_hash,
-        help="The name we'll call this set of tests")
-    opts, args = parser.parse_args()
-
-    if opts.list_tests:
-        tests_to_run = []
-        for m, vals in mapping.items():
-            new_tests = fnmatch.filter(vals, opts.test_pattern)
-            if len(new_tests) == 0: continue
-            load_tests(m, cwd)
-            keys = set(registry_entries())
-            tests_to_run += [t for t in new_tests if t in keys]
-        tests = list(set(tests_to_run))
-        print ("\n    ".join(tests))
-        sys.exit(0)
-
-    # Load the test ds and make sure it's good.
-    ds = load(opts.parameter_file)
-    if ds is None:
-        print "Couldn't load the specified parameter file."
-        sys.exit(1)
-
-    # Now we modify our compare name and self name to include the ds.
-    compare_id = opts.compare_name
-    watcher = None
-    if compare_id is not None:
-        compare_id += "_%s_%s" % (ds, ds._hash())
-        watcher = Xunit()
-    this_id = opts.this_name + "_%s_%s" % (ds, ds._hash())
-
-    rtr = RegressionTestRunner(this_id, compare_id,
-                               results_path=opts.storage_dir,
-                               compare_results_path=opts.storage_dir,
-                               io_log=[opts.parameter_file])
-
-    rtr.watcher = watcher
-    tests_to_run = []
-    for m, vals in mapping.items():
-        new_tests = fnmatch.filter(vals, opts.test_pattern)
-
-        if len(new_tests) == 0: continue
-        load_tests(m, cwd)
-        keys = set(registry_entries())
-        tests_to_run += [t for t in new_tests if t in keys]
-    for test_name in sorted(tests_to_run):
-        print "RUNNING TEST", test_name
-        rtr.run_test(test_name)
-    if watcher is not None:
-        rtr.watcher.report()
-    failures = 0
-    passes = 1
-    for test_name, result in sorted(rtr.passed_tests.items()):
-        if not result:
-            print "TEST %s: %s" % (test_name, result)
-            print "    %s" % rtr.test_messages[test_name]
-        if result: passes += 1
-        else: failures += 1
-    print "Number of passes  : %s" % passes
-    print "Number of failures: %s" % failures

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db tests/volume_rendering.py
--- a/tests/volume_rendering.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from yt.mods import *
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTDatasetTest, RegressionTestException
-from yt.funcs import ensure_list
-
-
-class VolumeRenderingInconsistent(RegressionTestException):
-    pass
-
-
-class VolumeRenderingConsistency(YTDatasetTest):
-    name = "volume_rendering_consistency"
-
-    def run(self):
-        c = (self.ds.domain_right_edge + self.ds.domain_left_edge) / 2.
-        W = na.sqrt(3.) * (self.ds.domain_right_edge - \
-            self.ds.domain_left_edge)
-        N = 512
-        n_contours = 5
-        cmap = 'algae'
-        field = 'Density'
-        mi, ma = self.ds.all_data().quantities['Extrema'](field)[0]
-        mi, ma = na.log10(mi), na.log10(ma)
-        contour_width = (ma - mi) / 100.
-        L = na.array([1.] * 3)
-        tf = ColorTransferFunction((mi - 2, ma + 2))
-        tf.add_layers(n_contours, w=contour_width,
-                      col_bounds=(mi * 1.001, ma * 0.999),
-                      colormap=cmap, alpha=na.logspace(-1, 0, n_contours))
-        cam = self.ds.camera(c, L, W, (N, N), transfer_function=tf,
-            no_ghost=True)
-        image = cam.snapshot()
-        # image = cam.snapshot('test_rendering_%s.png'%field)
-        self.result = image
-
-    def compare(self, old_result):
-        # Compare the deltas; give a leeway of 1e-8
-        delta = na.nanmax(na.abs(self.result - old_result) /
-                                 (self.result + old_result))
-        if delta > 1e-9: raise VolumeRenderingInconsistent()

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db yt/utilities/answer_testing/README
--- a/yt/utilities/answer_testing/README
+++ /dev/null
@@ -1,275 +0,0 @@
-Enzo Regression Test Runner
-===========================
-
-This is an evolving sketch of how Enzo regression tests might work.  They will
-be based on a python test runner, called from LCA test, that will output images
-as well as success/failure for a set of tests.
-
-The interface is still evolving, but we're working on both creating something
-fun, easy to write tests for, and clear.
-
-This is still a work in progress!  Things might change without notice!
-
-What Is A Test And How To Write One
------------------------------------
-
-A test at its most fundamental level makes some value from one or multiple
-outputs from a simulation and then compares those outputs to the outputs from
-some previous simulation.
-
-Each test follows a fixed interface, but we're trying to provide a couple
-mechanisms to make test writing easier.  To implement a test, you have to
-define a python class that subclasses from a particular type of test case.
-
-Your new test must implement the following interface, or it will fail:
-
-    name
-        All tests have to have the variable "name" defined in the class
-        definition.  This is a unique key that identifies the test, and it is
-        used to self-register every test in a global registry.  This will play
-        into filenames, so it's for the best if it doesn't contain spaces or
-        other unacceptable filename characters.
-
-    setup(self)
-        If you subclass from the YT test case or another test case base
-        class that implements setup, this may not be necessary.  This is
-        where all the pre-testing operation occurs, and is useful if you
-        want to write a bunch of tests that have the same setup.  Not return
-        value is needed.
-
-    run(self)
-        This is where the testing occurs and some value generated -- this value
-        can be an array, a number, a string, any Python or NumPy base type.
-        (For various reasons, YT objects can't be considered results, only
-        their base components.)  When this value is prepared, it needs to be
-        stored as the property "result" on the object -- for example, you might
-        do self.result = some_time_average .  No return value is needed.
-
-    compare(self, old_result)
-        This routine compares an existing result against the value computed
-        from a previous run.  It can be assumed that the "old_result" was
-        constructed in an identical "run" function, so direct comparison can be
-        made.  No return value is needed, but instead it is assumed that in
-        case of failure an exception that subclasses from
-        RegressionTestException will be raised -- however, the usage of
-        operations like compare_array_delta and compare_value_delta is
-        encouraged because they will handle the appropriate exception raising.
-
-    plot(self)
-        This function is optional, but it is used to generate an image from a
-        test.  The return value is the filename of the created image.
-
-Helpful Functions For Test Writing
-----------------------------------
-
-All test cases supply several base sets of operations:
-
-  * compare_array_delta(array1, array2, tolerance)
-        This computes
-            max(abs(array1-array2)/(array1+array2))
-        and fails if that is greater than tolerance.  Set tolerance to 0.0 for
-        an exact comparison.
-            
-  * compare_value_delta(value1, value2, tolerance)
-        This computes
-            abs(value1-value2)/(value1+value2)
-        and fails if that is greater than tolerance.  Set tolerance to 0.0 for
-        an exact comparison.
-
-Currently, a few exist:
-
-    SingleOutputTest
-        This is a test case designed to handle a single test.
-
-        Additional Attributes:
-          * filename => The dataset to test
-
-        Additional Methods:
-          * None
-        
-    MultipleOutputTest
-        This is a test case designed to handle multiple tests.
-
-        Additional Attributes:
-          * io_log => The IO log from the simulation
-
-        Additional Methods:
-          * __iter__ => You can iterate over the test case:
-                 for filename in self:
-                     ...
-                to have it return all the filenames in the IO log.
-        
-    YTDatasetTest
-        This test case is designed to work with YT, and provides a couple
-        additional things that YT can provide.
-
-        Additional Attributes:
-          * sim_center => The center of the simulation, from the domain left
-                          and right edges.
-          * max_dens_location => The point of highest density.
-
-          * entire_simulation => A data object containing the entire
-                                 simulation.
-
-        Additional Methods:
-          * pixelize(data_source, field, edges, dims) =>
-                This returns a (dims[0], dims[1]) array constructed from the
-                variable resolution (projection or slice) data object.  Edges
-                are in code units, (px_min, px_max, py_min, py_max) and default
-                to the entire domain.  dims is a tuple, (Nx, Ny).
-
-          * compare_data_arrays(d1, d2, tolerance) =>
-                yt often stores arrays hanging off dictionaries.  This accepts
-                d1 and d2, which are dictionarys with arrays as values, and
-                compares all the arrays using compare_array_delta, with
-                given tolerance.
-
-Sample Tests
-------------
-
-There are some example tests in the distribution.  But, a simple test case
-would also work well.  This is a test case using yt to find the maximum density
-in the simulation.  Note that we don't have to provide a setup function, as
-that's taken care of in the base class (YTDatasetTest.)
-
-    class TestMaximumDensity(YTDatasetTest):
-        name = "maximum_density"
-
-        def run(self):
-            # self.ds already exists
-            value, center = self.ds.find_max("density")
-            self.result = (value, center)
-
-        def compare(self, old_result):
-            value, center = self.result
-            old_value, old_center = old_result
-
-            # We want our old max density to agree with our new max density to
-            # a relative difference of 1e-7.
-            self.compare_value_delta(value, old_value, 1e-7)
-
-            # Now we check if our center has moved.
-            self.compare_value_array(center, old_center, 1e-7)
-
-        def plot(self):
-            # There's not much to plot, so we just return an empty list.
-            return []
-
-Running Tests
--------------
-
-Subclasses of RegressionTest are *self-registering*, which means they can be
-run.  Two classes are provided for running tests.  One is the test runner, and
-the other is a thin wrapper around a Shelve from the shelve module.  To run a
-series of tests, you need to instantiate a RegressionTestRunner and then tell
-it which tests to run.
-
-If the runner has a set of results against which to compare, it will do so.
-For every test, it will perform the following actions:
-
-    1. setup()
-    2. run()
-    3. plot(), store list of filenames in self.plot_list[test_name]
-    4. store test.results
-    5. test.compare(old_results), if a compare_id is supplied
-
-If a test is of type SingleOutputTest, or a subclass, this test will be run for
-every single output in the IO log.  If it is a MultipleOutputTest, only one for
-each test will be executed.
-
-The RegressionTestRunner has a public interface:
-
-    RegressionTestRunner:
-        __init__(id, compare_id, results_path, io_log)
-            The id is the unique id for this test case, which will be used for
-            the name of the results database.  The compare_id (optional) is the
-            id of the results database against which we will compare.  The
-            results_path is the path to the directory in which results sets are
-            stored, defaulting to the current directory.  io_log, defaulting to
-            "OutputLog", is the IO log from Enzo that lists all of the outputs.
-
-        run_test(name):
-            The test corresponding to that test name is run.
-
-        run_all_tests()
-            This runs all of the tests that have been registered.  Every time a
-            test is defined, it is registered -- so this list can get quite
-            long!  But, by selectively importing 'plugin' modules, the full
-            list of tests can be controlled.
-
-        run_tests_from_file(filename):
-            Every line in a filename is parsed, and if it matches a test name
-            in the test registry, it will be run.
-
-The included sample script run_tests.py will instantiate a test runner, run it
-once on a set of outputs, and then run it again comparing against the results
-from the first run.  This should always succeed, but it gives an idea of how to
-go about running tests.
-
-Test Creation Convenience Functions
------------------------------------
-
-Because of the self-registering nature of the tests, we can very conveniently
-create new ones just by subclassing.  But, subclassing a lot of tests can be a
-bit annoying!  So the create_test function has been created.
-
-Going back to our example of the maximum density location function, we could
-rewrite it slightly to make it work with the create_test function.  We remove
-the name and we make our parameter, field, known, but we don't set it.
-
-    class TestMaximumValue(YTDatasetTest):
-
-        field = None
-
-        def run(self):
-            # self.ds already exists
-            value, center = self.ds.find_max(self.field)
-            self.result = (value, center)
-
-        def compare(self, old_result):
-            value, center = self.result
-            old_value, old_center = old_result
-
-            # We want our old max density to agree with our new max density to
-            # a relative difference of 1e-7.
-            self.compare_value_delta(value, old_value, 1e-7)
-
-            # Now we check if our center has moved.
-            self.compare_value_array(center, old_center, 1e-7)
-
-        def plot(self):
-            # There's not much to plot, so we just return an empty list.
-            return []
-
-Note that it's mostly the same, but we are using self.field to find the maximum
-density instead of hard coding it to Density.  We also don't specify 'name' so
-that this base class won't be registered.  We can now use create_test to make a
-bunch, setting "field" to anything we want, and naming them anything we want:
-
-    for field in ["Temperature", "x-velocity", "y-velocity", "z-velocity"]:
-        create_test(TestMaximumValue, "maximum_%s_test" % field,
-                    field = field)
-
-This makes and then registers tests of the name format given, which are then
-accessible through the test runner.  See the projection and gas distribution
-test creations in hydro_tests.py for a few more examples of how to use this.
-
-TODO
-====
-
-This is still fairly bare bones!  There are some fun areas we can expand into:
-
-    * We need more tests!  More than that, we need tests that know something
-      about the different test *problems*.  We'll need lists of tests to run
-      for every single problem type.
-    * Sometimes the results database acts oddly and can't add a new value.
-    * The source tree needs to be re-organized and this README file turned into
-      documentation that includes every test in the main distribution.
-    * Doc strings need to be added to all functions and classes.  Comments for
-      all tests need to be included.
-    * More explicit test naming and running.
-    * Generation of HTML pages including all the pages and the results, along
-      with download links.  This should be done with LCA test.
-    * Plots should be zipped up and removed from the file system.  The zipfile
-      module would work great for this.
-    * And lots more ...

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -13,29 +13,5 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from .runner import \
-    RegressionTestRunner, \
-    RegressionTestStorage, \
-    clear_registry, \
-    registry_entries
+from yt.utilities.answer_testing.framework import AnswerTesting
 
-from .output_tests import \
-    YTDatasetTest, \
-    create_test
-
-from .default_tests import \
-    TestFieldStatistics, \
-    TestAllProjections
-
-from .xunit import \
-    Xunit
-
-from .halo_tests import \
-    TestHaloCompositionHashHOP, \
-    TestHaloCompositionHashFOF, \
-    TestHaloCompositionHashPHOP
-
-try:
-    from .framework import AnswerTesting
-except ImportError:
-    raise

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db yt/utilities/answer_testing/default_tests.py
--- a/yt/utilities/answer_testing/default_tests.py
+++ /dev/null
@@ -1,61 +0,0 @@
-"""
-Default tests
-
-
-
-"""
-from __future__ import absolute_import
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.mods import *
-from .output_tests import YTDatasetTest, create_test
-
-class TestFieldStatistics(YTDatasetTest):
-
-    tolerance = None
-
-    def run(self):
-        # We're going to calculate the field statistics for every single field.
-        results = {}
-        for field in self.ds.field_list:
-            # Do it here so that it gets wiped each iteration
-            dd = self.ds.all_data() 
-            results[field] = (dd[field].std(),
-                              dd[field].mean(),
-                              dd[field].min(),
-                              dd[field].max())
-        self.result = results
-
-    def compare(self, old_result):
-        for field in sorted(self.result):
-            for i in range(4):
-                oi = old_result[field][i]
-                ni = self.result[field][i]
-                self.compare_value_delta(oi, ni, self.tolerance)
-
-class TestAllProjections(YTDatasetTest):
-
-    tolerance = None
-
-    def run(self):
-        results = {}
-        for field in self.ds.field_list:
-            if self.ds.field_info[field].particle_type: continue
-            results[field] = []
-            for ax in range(3):
-                t = self.ds.proj(field, ax)
-                results[field].append(t.field_data)
-        self.result = results
-
-    def compare(self, old_result):
-        for field in sorted(self.result):
-            for p1, p2 in zip(self.result[field], old_result[field]):
-                self.compare_data_arrays(p1, p2, self.tolerance)
-

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -15,7 +15,9 @@
 #-----------------------------------------------------------------------------
 
 import logging
+import numpy as np
 import os
+import time
 import hashlib
 import contextlib
 import sys
@@ -27,17 +29,26 @@
 
 from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
-from yt.testing import *
+from yt.funcs import \
+    get_pbar
+from yt.testing import \
+    assert_equal, \
+    assert_allclose_units, \
+    assert_rel_equal, \
+    assert_almost_equal
 from yt.convenience import load, simulation
 from yt.config import ytcfg
 from yt.data_objects.static_output import Dataset
 from yt.data_objects.time_series import SimulationTimeSeries
+from yt.utilities.exceptions import \
+    YTNoOldAnswer, \
+    YTCloudError, \
+    YTOutputNotIdentified
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
 
 import matplotlib.image as mpimg
 import yt.visualization.plot_window as pw
-import yt.extern.progressbar as progressbar
 
 mylog = logging.getLogger('nose.plugins.answer-testing')
 run_big_data = False
@@ -171,7 +182,7 @@
         url = _url_path.format(self.reference_name, ds_name)
         try:
             resp = urllib.request.urlopen(url)
-        except urllib.error.HTTPError as ex:
+        except urllib.error.HTTPError:
             raise YTNoOldAnswer(url)
         else:
             for this_try in range(3):
@@ -658,7 +669,7 @@
     for i in range(num_images):
         mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[i])))
         mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[i])))
-        assert compare_images(fns[0], fns[1], 10**(-decimals)) == None
+        assert compare_images(fns[0], fns[1], 10**(-decimals)) is None
         for fn in fns: os.remove(fn)
 
 class PlotWindowAttributeTest(AnswerTestingTest):
@@ -769,7 +780,7 @@
         return lambda: None
     def ftrue(func):
         return func
-    if run_big_data == False and big_data == True:
+    if run_big_data is False and big_data is True:
         return ffalse
     elif not can_run_sim(sim_fn, sim_type, file_check):
         return ffalse
@@ -781,7 +792,7 @@
         return lambda: None
     def ftrue(func):
         return func
-    if run_big_data == False and big_data == True:
+    if run_big_data is False and big_data is True:
         return ffalse
     elif not can_run_ds(ds_fn, file_check):
         return ffalse

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db yt/utilities/answer_testing/halo_tests.py
--- a/yt/utilities/answer_testing/halo_tests.py
+++ /dev/null
@@ -1,166 +0,0 @@
-from __future__ import absolute_import
-from yt.mods import *
-import matplotlib
-import pylab
-from .output_tests import SingleOutputTest, YTDatasetTest, create_test
-from yt.analysis_modules.halo_finding.api import *
-import hashlib
-import numpy as np
-
-# Tests the number of halos returned by the HOP halo finder on a dataset
-class TestHaloCountHOP(YTDatasetTest):
-    threshold = 80.0
-
-    def run(self):
-        # Find the halos using vanilla HOP.
-        halos = HaloFinder(self.ds, threshold=self.threshold, dm_only=False)
-        # We only care about the number of halos.
-        self.result = len(halos)
-                    
-    def compare(self, old_result):
-        # The new value should be identical to the old one.
-        self.compare_value_delta(self.result, old_result, 0)
-
-    def plot(self):
-        return []
-
-# Tests the number of halos returned by the FOF halo finder on a dataset
-class TestHaloCountFOF(YTDatasetTest):
-    link = 0.2
-    padding = 0.02
-
-    def run(self):
-        # Find the halos using FOF.
-        halos = FOFHaloFinder(self.ds, link=self.link, dm_only=False, 
-                               padding=self.padding)
-        # We only care about the number of halos.
-        self.result = len(halos)
-                    
-    def compare(self, old_result):
-        # The new value should be identical to the old one.
-        self.compare_value_delta(self.result, old_result, 0)
-
-    def plot(self):
-        return []
-
-# Tests the number of halos returned by the Parallel HOP halo finder on a 
-# dataset
-class TestHaloCountPHOP(YTDatasetTest):
-    threshold = 80.0
-
-    def run(self):
-        # Find the halos using parallel HOP.
-        halos = parallelHF(self.ds, threshold=self.threshold, dm_only=False)
-        # We only care about the number of halos.
-        self.result = len(halos)
-                    
-    def compare(self, old_result):
-        # The new value should be identical to the old one.
-        self.compare_value_delta(self.result, old_result, 0)
-
-    def plot(self):
-        return []
-
-class TestHaloComposition(YTDatasetTest):
-    threshold=80.0
-    
-    def run(self):
-        # Find the halos using vanilla HOP.
-        halos = HaloFinder(self.ds, threshold=self.threshold, dm_only=False)
-        # The result is a list of the particle IDs, stored
-        # as sets for easy comparison.
-        IDs = []
-        for halo in halos:
-            IDs.append(set(halo["particle_index"]))
-        self.result = IDs
-    
-    def compare(self, old_result):
-        # All the sets should be identical.
-        pairs = zip(self.result, old_result)
-        for pair in pairs:
-            if len(pair[0] - pair[1]) != 0:
-                return False
-        return True
-    
-# Tests the content of the halos returned by the HOP halo finder on a dataset 
-# by comparing the hash of the arrays of all the particles contained in each
-# halo.  Evidently breaks on parallel runtime.  DO NOT USE.
-class TestHaloCompositionHashHOP(YTDatasetTest):
-    threshold=80.0
-    
-    def run(self):
-        # Find the halos using vanilla HOP.
-        halos = HaloFinder(self.ds, threshold=self.threshold, dm_only=False)
-        # The result is a flattened array of the arrays of the particle IDs for
-        # each halo
-        IDs = []
-        for halo in halos:
-            IDs.append(halo["particle_index"])
-        IDs = np.concatenate(IDs)
-        self.result = IDs
-    
-    def compare(self, old_result):
-        # All the lists of arrays should be identical.  To check this
-        # faster, we take the 256-bit hash of these lists and compare them
-        result_hash = hashlib.sha256(self.result.tostring()).hexdigest()
-        old_result_hash = hashlib.sha256(old_result.tostring()).hexdigest()
-        if result_hash == old_result_hash:
-            return True
-        else:
-            return False
-
-# Tests the content of the halos returned by the FOF halo finder on a dataset 
-# by comparing the hash of the arrays of all the particles contained in each
-# halo.  Evidently breaks on parallel runtime.  DO NOT USE.
-class TestHaloCompositionHashFOF(YTDatasetTest):
-    link = 0.2
-    padding = 0.02
-    
-    def run(self):
-        # Find the halos using vanilla FOF.
-        halos = FOFHaloFinder(self.ds, link=self.link, dm_only=False, 
-                               padding=self.padding)
-        # The result is a flattened array of the arrays of the particle IDs for
-        # each halo
-        IDs = []
-        for halo in halos:
-            IDs.append(halo["particle_index"])
-        IDs = np.concatenate(IDs)
-        self.result = IDs
-    
-    def compare(self, old_result):
-        # All the lists of arrays should be identical.  To check this
-        # faster, we take the 256-bit hash of these lists and compare them
-        result_hash = hashlib.sha256(self.result.tostring()).hexdigest()
-        old_result_hash = hashlib.sha256(old_result.tostring()).hexdigest()
-        if result_hash == old_result_hash:
-            return True
-        else:
-            return False
-
-# Tests the content of the halos returned by the Parallel HOP halo finder on a 
-# dataset by comparing the hash of the arrays of all the particles contained 
-# in each halo.  Evidently breaks on parallel runtime.  DO NOT USE.
-class TestHaloCompositionHashPHOP(YTDatasetTest):
-    threshold=80.0
-    
-    def run(self):
-        # Find the halos using parallel HOP.
-        halos = parallelHF(self.ds, threshold=self.threshold, dm_only=False)
-        # The result is a flattened array of the arrays of the particle IDs for
-        # each halo
-        IDs = []
-        for halo in halos:
-            IDs.append(halo["particle_index"])
-        IDs = np.concatenate(IDs)
-        self.result = IDs
-    
-    def compare(self, old_result):
-        # All the lists of arrays should be identical.  To check this
-        # faster, we take the 256-bit hash of these lists and compare them
-        result_hash = hashlib.sha256(self.result.tostring()).hexdigest()
-        old_result_hash = hashlib.sha256(old_result.tostring()).hexdigest()
-        if result_hash == old_result_hash:
-            return True
-        else:
-            return False

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db yt/utilities/answer_testing/hydro_tests.py
--- a/yt/utilities/answer_testing/hydro_tests.py
+++ /dev/null
@@ -1,196 +0,0 @@
-"""
-Hydro tests
-
-
-
-"""
-from __future__ import absolute_import
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import matplotlib
-import pylab
-from yt.mods import *
-from .output_tests import SingleOutputTest, YTDatasetTest, create_test
-
-class TestProjection(YTDatasetTest):
-
-    field = None
-    axis = None
-    weight_field = None
-
-    def run(self):
-        # First we get our flattened projection -- this is the
-        # Density, px, py, pdx, and pdy
-        proj = self.ds.proj(self.field, self.axis, 
-                              weight_field=self.weight_field)
-        # Now let's stick it in a buffer
-        pixelized_proj = self.pixelize(proj, self.field)
-        # We just want the values, so this can be stored
-        # independently of the dataset.
-        # The .field_data attributes strip out everything other than the actual array
-        # values.
-        self.result = (proj.field_data, pixelized_proj.data)
-
-    def compare(self, old_result):
-        proj, pixelized_proj = self.result
-        oproj, opixelized_proj = old_result
-
-        self.compare_data_arrays(proj, oproj)
-        self.compare_array_delta(
-            pixelized_proj[self.field],
-            opixelized_proj[self.field],
-            1e-7)
-
-    def plot(self):
-        pylab.clf()
-        pylab.imshow(self.result[1][self.field],
-            interpolation='nearest', origin='lower')
-        fn = "%s_%s_%s_projection.png" % (self.ds, self.field,
-                                          self.weight_field)
-        pylab.savefig(fn)
-        return [fn]
-
-class TestOffAxisProjection(YTDatasetTest):
-
-    field = None
-    weight_field = None
-
-    def run(self):
-        # Here proj will just be the data array.
-        proj = off_axis_projection(self.ds, 
-                                   (0.5 * (self.ds.domain_left_edge + 
-                                           self.ds.domain_right_edge)),
-                                   [1., 1., 1.], 1., 400,
-                                   self.field, weight=self.weight_field)
-
-        # values.
-        self.result = proj
-
-    def compare(self, old_result):
-        proj  = self.result
-        oproj = old_result
-
-        self.compare_array_delta(proj, oproj, 1e-7)
-
-    def plot(self):
-        fn = "%s_%s_%s_off-axis_projection.png" % \
-            (self.ds, self.field, self.weight_field)
-        write_image(self.result, fn)
-        return [fn]
-
-class TestRay(YTDatasetTest):
-
-    field = None
-
-    def run(self):
-        np.random.seed(4333)
-        start_point = np.random.random(self.ds.dimensionality) * \
-            (self.ds.domain_right_edge - self.ds.domain_left_edge) + \
-            self.ds.domain_left_edge
-        end_point   = np.random.random(self.ds.dimensionality) * \
-            (self.ds.domain_right_edge - self.ds.domain_left_edge) + \
-            self.ds.domain_left_edge
-
-        # Here proj will just be the data array.
-        ray = self.ds.ray(start_point, end_point, field=self.field)
-
-        # values.
-        self.result = ray[self.field]
-
-    def compare(self, old_result):
-        ray  = self.result
-        oray = old_result
-
-        self.compare_array_delta(ray, oray, 1e-7)
-
-    def plot(self):
-        return
-
-class TestSlice(YTDatasetTest):
-
-    field = None
-    axis = None
-
-    def run(self):
-        # Here proj will just be the data array.
-        slice = self.ds.slice(self.axis, 
-                                (0.5 * (self.ds.domain_left_edge + 
-                                        self.ds.domain_right_edge))[self.axis],
-                                fields=self.field)
-        # values.
-        self.result = slice.field_data
-
-    def compare(self, old_result):
-        slice  = self.result
-        oslice = old_result
-
-        self.compare_data_arrays(slice, oslice)
-
-    def plot(self):
-        fn = "%s_%s_slice.png" % (self.ds, self.field)
-        write_image(self.result[self.field], fn)
-        return [fn]
-
-# Now we create all our tests.  We are using the create_test
-# function, which is a relatively simple function that takes the base class,
-# a name, and any parameters that the test requires.
-for axis in range(3):
-    for field in ["density", "temperature"]:
-        create_test(TestProjection, "projection_test_%s_%s" % (axis, field),
-                    field = field, axis = axis)
-
-class TestGasDistribution(YTDatasetTest):
-    field_x = None
-    field_y = None
-    weight = "cell_mass"
-    n_bins = 32
-
-    def run(self):
-        # We're NOT going to use the low-level profiling API here,
-        # because we are avoiding the calculations of min/max,
-        # as those should be tested in another test.
-        pc = PlotCollection(self.ds, center=self.sim_center)
-        p = pc.add_profile_object(self.entire_simulation,
-            [self.field_x, self.field_y], x_bins = self.n_bins,
-            weight=self.weight)
-        # The arrays are all stored in a dictionary hanging off the profile
-        # object
-        self.result = p.data.field_data
-                    
-    def compare(self, old_result):
-        self.compare_data_arrays(
-            self.result, old_result)
-
-    def plot(self):
-        return []
-
-# Now we create all our tests, but we're only going to check the binning
-# against Density for now.
-for field in ["temperature", "velocity_x"]:
-    create_test(TestGasDistribution, "profile_density_test_%s" % field,
-                field_x = "density", field_y = field)
-
-class Test2DGasDistribution(TestGasDistribution):
-    x_bins = 128
-    y_bins = 128
-    field_z = "cell_mass"
-    weight = None
-    def run(self):
-        # We're NOT going to use the low-level profiling API here,
-        # because we are avoiding the calculations of min/max,
-        # as those should be tested in another test.
-        pc = PlotCollection(self.ds, center=self.sim_center)
-        p = pc.add_phase_object(self.entire_simulation,
-            [self.field_x, self.field_y, self.field_z], x_bins = self.x_bins, y_bins = self.y_bins,
-            weight=self.weight)
-        # The arrays are all stored in a dictionary hanging off the profile
-        # object
-        self.result = p.data.field_data
-

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ /dev/null
@@ -1,224 +0,0 @@
-"""
-Base classes for answer testing
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import matplotlib
-from yt.mods import *
-
-# We first create our dictionary of tests to run.  This starts out empty, and
-# as tests are imported it will be filled.
-if "TestRegistry" not in locals():
-    class TestRegistry(dict):
-        def __new__(cls, *p, **k):
-            if not '_the_instance' in cls.__dict__:
-                cls._the_instance = dict.__new__(cls)
-                return cls._the_instance
-if "test_registry" not in locals():
-    test_registry = TestRegistry()
-
-# The exceptions we raise, related to the character of the failure.
-
-class RegressionTestException(Exception):
-    pass
-
-class ValueDelta(RegressionTestException):
-    def __init__(self, delta, acceptable):
-        self.delta = delta
-        self.acceptable = acceptable
-
-    def __repr__(self):
-        return "ValueDelta: Delta %s, max of %s" % (
-            self.delta, self.acceptable)
-
-class ArrayDelta(ValueDelta):
-    def __repr__(self):
-        nabove = len(np.where(self.delta > self.acceptable)[0])
-        return "ArrayDelta: Delta max of %s, acceptable of %s.\n" \
-               "%d of %d points above the acceptable limit" % \
-               (np.nanmax(self.delta), self.acceptable, nabove,
-                self.delta.size)
-
-class ShapeMismatch(RegressionTestException):
-    def __init__(self, old_shape, current_shape):
-        self.old_shape = old_shape
-        self.current_shape = current_shape
-
-    def __repr__(self):
-        return "Shape Mismatch: old_buffer %s, current_buffer %s" % (
-            self.old_shape, self.current_shape)
-
-class RegressionTest(object):
-    name = None
-    result = None
-    output_type = None
-
-    class __metaclass__(type):
-        # This ensures that all the tests are auto-registered if they have a
-        # name.  If they do not have a name, they are considered to be base
-        # classes to be overridden and implemented by someone else.
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            if cls.name is not None:
-                test_registry[cls.name] = cls
-
-    def setup(self):
-        """
-        This function must be defined if the problem requires additional setup.
-        Note that for the most part this will be defined in base classes where
-        subclasses will only implement 'run'.
-        """
-        pass
-
-    def run(self):
-        """
-        This function must generate a result value, of any type, and store it
-        in self.result.
-        """
-        pass
-
-    def compare(self, old_result):
-        """
-        This function must accept `old_result` and compare it somehow against
-        the value stored in `self.result`.  If the result is a failure, it must
-        raise an exception.  Otherwise it is considered to be a success.
-        """
-        pass
-
-    def plot(self):
-        """
-        This function can optionally plot the contents of `self.result`.
-        """
-        pass
-
-    def compare_array_delta(self, a1, a2, acceptable):
-        """
-        This is a helper function.  It accepts two numpy arrays and compares
-        the maximum relative difference.  If the maximum relative difference is
-        greater than `acceptable` it is considered a failure and an appropriate
-        exception is raised.
-        """
-        if a1.shape != a2.shape:
-            raise ShapeMismatch(a1, a2)
-        delta = np.abs(a1 - a2).astype("float64")/(a1 + a2)
-        if np.nanmax(delta) > acceptable:
-            raise ArrayDelta(delta, acceptable)
-        return True
-
-    def compare_value_delta(self, v1, v2, acceptable):
-        """
-        This is a helper function.  It accepts two floating point values and
-        calculates the maximum relative different.  If the maximum relative
-        difference is greater than `acceptable` it is considered a failure and
-        an appropriate exception is raised.
-        """
-        delta = np.abs(v1 - v2)/(v1 + v2)
-        if delta > acceptable:
-            raise ValueDelta(delta, acceptable)
-        return True
-
-class SingleOutputTest(RegressionTest):
-    output_type = 'single'
-
-    def __init__(self, filename):
-        """
-        This test mechanism is designed to accept a single filename and
-        evaluate it, not necessarily utilizing yt's functionality to do so.
-        """
-        self.filename = filename
-
-class MultipleOutputTest(RegressionTest):
-    output_type = 'multiple'
-
-    io_log_header = "DATASET WRITTEN"
-
-    def __init__(self, io_log):
-        """
-        This test mechanism is designed to accept an OutputLog file and then
-        iterate over it, evaluating every single dataset individually.
-        """
-        self.io_log = io_log
-
-    def __iter__(self):
-        if isinstance(self.io_log, str):
-            for line in open(self.io_log):
-                yield line[len(self.io_log_header):].split()[0].strip()
-        elif isinstance(self.io_log, list):
-            for line in self.io_log: yield line
-
-def create_test(base, new_name, **attrs):
-    """
-    This function accepts a base class of a test, sets some attributes on it,
-    and then registers a new test.  It's a fast way of registering multiple
-    tests that share the same testing logic but that differ on a few parameters
-    or combinations of parameters.
-    """
-    new_name = "%s_%s" % (base.__name__, new_name)
-    attrs['name'] = new_name
-    return type(new_name, (base,), attrs)
-
-class YTDatasetTest(SingleOutputTest):
-
-    def setup(self):
-        self.ds = load(self.filename)
-
-    def pixelize(self, data, field, edges = None, dims = (512, 512)):
-        """
-        This is a helper function that returns a 2D array of the specified
-        source, in the specified field, at the specified spatial extent.
-        """
-        xax = self.ds.coordinates.x_axis[self.axis]
-        yax = self.ds.coordinates.y_axis[self.axis]
-        
-        if edges is None:
-            edges = (self.ds.domain_left_edge[xax],
-                     self.ds.domain_right_edge[xax],
-                     self.ds.domain_left_edge[yax],
-                     self.ds.domain_right_edge[yax])
-        frb = FixedResolutionBuffer( data, edges, dims)
-        frb[field] # To make the pixelization
-        return frb
-
-    def compare_data_arrays(self, d1, d2, tol = 1e-7):
-        """
-        This is a helper function.  It accepts two dictionaries of numpy arrays
-        and compares the maximum relative difference of every array.  If the
-        maximum relative difference is greater than `acceptable` it is
-        considered a failure and an appropriate exception is raised.
-        """
-        for field in d1.keys():
-            self.compare_array_delta(d1[field], d2[field], tol)
-
-    @property
-    def sim_center(self):
-        """
-        This returns the center of the domain.
-        """
-        return 0.5*(self.ds.domain_right_edge + self.ds.domain_left_edge)
-
-    @property
-    def max_dens_location(self):
-        """
-        This is a helper function to return the location of the most dense
-        point.
-        """
-        return self.ds.find_max("density")[1]
-
-    @property
-    def entire_simulation(self):
-        """
-        Return an unsorted array of values that cover the entire domain.
-        """
-        return self.ds.all_data()
-        
-

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db yt/utilities/answer_testing/particle_tests.py
--- a/yt/utilities/answer_testing/particle_tests.py
+++ /dev/null
@@ -1,60 +0,0 @@
-from __future__ import absolute_import
-import matplotlib
-from yt.mods import *
-import pylab
-from .output_tests import SingleOutputTest, YTDatasetTest, create_test
-
-class TestParticleUniqueIDs(YTDatasetTest):
-
-    def run(self):
-        # Test to make sure that all the particles have unique IDs.
-        all = self.ds.all_data()
-        IDs = all["particle_index"]
-        # Make sure the order is the same every time.
-        IDs = IDs[IDs.argsort()]
-        self.result = IDs
-                    
-    def compare(self, old_result):
-        # Two things: there should be no repeats in either the new or
-        # the old, and the two sets should be the same.
-        if len(old_result) != len(set(old_result)): return False
-        if len(self.result) != len(set(self.result)): return False
-        if (self.result != old_result).all(): return False
-        return True
-
-    def plot(self):
-        return []
-
-create_test(TestParticleUniqueIDs, "particle_unique_ids_test")
-
-class TestParticleExtrema(YTDatasetTest):
-
-    def run(self):
-        # Tests to make sure there are no particle positions aren't changing
-        # drastically. This is very unlikely to be a problem.
-        all = self.ds.all_data()
-        min = np.empty(3,dtype='float64')
-        max = min.copy()
-        dims = ["particle_position_x","particle_position_y",
-            "particle_position_z"]
-        for i in range(3):
-            min[i] = np.min(all[dims[i]])
-            max[i] = np.max(all[dims[i]])
-        self.result = (min,max)
-    
-    def compare(self, old_result):
-        min,max = self.result
-        old_min, old_max = old_result
-        # The extrema should be very similar.
-        self.compare_array_delta(min, old_min, 1e-7)
-        self.compare_array_delta(max, old_max, 1e-7)
-        # Also, the min/max shouldn't be outside the boundaries.
-        if (min < self.ds.domain_left_edge).any(): return False
-        if (max > self.ds.domain_right_edge).any(): return False
-        return True
-    
-    def plot(self):
-        return []
-
-create_test(TestParticleExtrema, "particle_extrema_test")
-

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db yt/utilities/answer_testing/run_tests.py
--- a/yt/utilities/answer_testing/run_tests.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from __future__ import absolute_import
-from yt.config import ytcfg
-ytcfg["yt","loglevel"] = '50'
-ytcfg["yt","suppressStreamLogging"] = 'True'
-
-from . import hydro_tests # Just importing will register the tests!
-from . import halo_tests
-from . import particle_tests
-from .runner import RegressionTestRunner
-
-first_runner = RegressionTestRunner("first")
-first_runner.run_all_tests()
-second_runner = RegressionTestRunner("second", "first")
-second_runner.run_all_tests()

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db yt/utilities/answer_testing/runner.py
--- a/yt/utilities/answer_testing/runner.py
+++ /dev/null
@@ -1,183 +0,0 @@
-"""
-Runner mechanism for answer testing
-
-
-
-"""
-from __future__ import print_function
-from __future__ import absolute_import
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import matplotlib
-import os, shelve, sys, imp, tempfile
-from yt.extern.six.moves import cPickle
-
-from yt.config import ytcfg; ytcfg["yt","serialize"] = "False"
-from yt.funcs import *
-from yt.utilities.command_line import YTCommand
-from .xunit import Xunit
-
-from .output_tests import test_registry, MultipleOutputTest, \
-                         RegressionTestException
-
-def clear_registry():
-    test_registry.clear()
-
-class FileNotExistException(Exception):
-    def __init__(self, filename):
-        self.filename = filename
-
-    def __repr__(self):
-        return "FileNotExistException: %s" % (self.filename)
-
-
-def registry_entries():
-    return test_registry.keys()
-
-class RegressionTestStorage(object):
-    def __init__(self, results_id, path = "."):
-        self.id = results_id
-        if results_id == "":
-            self._path = os.path.join(path, "results")
-        else:
-            self._path = os.path.join(path, "results_%s" % self.id)
-        if not os.path.isdir(self._path): 
-            only_on_root(os.mkdir, self._path)
-        if os.path.isfile(self._path): raise RuntimeError
-
-    def _fn(self, tn):
-        return os.path.join(self._path, tn)
-
-    @rootonly
-    def __setitem__(self, test_name, result):
-        # We have to close our shelf manually,
-        # as the destructor does not necessarily do this.
-        # Context managers would be more appropriate.
-        f = open(self._fn(test_name), "wb")
-        cPickle.dump(result, f, protocol=-1)
-        f.close()
-
-    def __getitem__(self, test_name):
-        if not os.path.exists(self._fn(test_name)):
-            raise FileNotExistException(self._fn(test_name))
-        f = open(self._fn(test_name), "rb")
-        tr = cPickle.load(f)
-        f.close()
-        return tr
-
-class RegressionTestRunner(object):
-    def __init__(self, results_id, compare_id = None,
-                 results_path = ".", compare_results_path = ".",
-                 io_log = "OutputLog", plot_tests = False):
-        # This test runner assumes it has been launched with the current
-        # working directory that of the test case itself.
-        self.io_log = io_log
-        self.id = results_id
-        if compare_id is not None:
-            self.old_results = RegressionTestStorage(
-                                    compare_id, path=compare_results_path)
-        else:
-            self.old_results = None
-        self.results = RegressionTestStorage(results_id, path=results_path)
-        self.plot_list = {}
-        self.passed_tests = {}
-        self.test_messages = {}
-        self.plot_tests = plot_tests
-
-    def run_all_tests(self):
-        for name in sorted(test_registry):
-            self.run_test(name)
-        return self.passed_tests
-
-    def run_test(self, name):
-        # We'll also need to call the "compare" operation,
-        # but for that we'll need a data store.
-        test = test_registry[name]
-        if test.output_type == 'single':
-            mot = MultipleOutputTest(self.io_log)
-            for i,fn in enumerate(mot):
-                # This next line is to keep the shelve module
-                # from happily gobbling the disk
-                #if i > 5: break 
-                test_instance = test(fn)
-                test_instance.name = "%s_%s" % (
-                    os.path.basename(fn), test_instance.name )
-                self._run(test_instance)
-
-        elif test.output_type == 'multiple':
-            test_instance = test(self.io_log)
-            self._run(test_instance)
-
-    watcher = None
-    def _run(self, test):
-        if self.watcher is not None:
-            self.watcher.start()
-        print(self.id, "Running", test.name, end=' ')
-        test.setup()
-        test.run()
-        if self.plot_tests:
-            self.plot_list[test.name] = test.plot()
-        self.results[test.name] = test.result
-        success, msg, exc = self._compare(test)
-        if self.old_results is None:
-            print("NO OLD RESULTS")
-        else:
-            if success == True: print("SUCCEEDED")
-            else: print("FAILED", msg)
-        self.passed_tests[test.name] = success
-        self.test_messages[test.name] = msg
-        if self.watcher is not None:
-            if success == True:
-                self.watcher.addSuccess(test.name)
-            else:
-                self.watcher.addFailure(test.name, exc)
-
-    def _compare(self, test):
-        if self.old_results is None:
-            return (True, "", "New Test")
-        try:
-            old_result = self.old_results[test.name]
-        except FileNotExistException:
-            return (False, sys.exc_info())
-        try:
-            test.compare(old_result)
-        except RegressionTestException as exc:
-            return (False, repr(exc), sys.exc_info())
-        return (True, "", "Pass")
-
-    def run_tests_from_file(self, filename):
-        for line in open(filename):
-            test_name = line.strip()
-            if test_name not in test_registry:
-                if test_name[0] != "#":
-                    print("Test '%s' not recognized, skipping" % (test_name))
-                continue
-            print("Running '%s'" % (test_name))
-            self.run_test(line.strip())
-
-def _load_modules(test_modules):
-    for fn in test_modules:
-        if fn.endswith(".py"): fn = fn[:-3]
-        print("Loading module %s" % (fn))
-        mname = os.path.basename(fn)
-        f, filename, desc = imp.find_module(mname, [os.path.dirname(fn)])
-        project = imp.load_module(mname, f, filename, desc)
-
-def _update_io_log(opts, kwargs):
-    if opts.datasets is None or len(opts.datasets) == 0: return
-    f = tempfile.NamedTemporaryFile()
-    kwargs['io_log'] = f.name
-    for d in opts.datasets:
-        fn = os.path.expanduser(d)
-        print("Registered dataset %s" % fn)
-        f.write("DATASET WRITTEN %s\n" % fn)
-    f.flush()
-    f.seek(0)
-    return f

diff -r 3764648e1f5ba8f15624aa39d213e7976d539e7c -r ba6cf032b66bb752790911c472f688781997b8db yt/utilities/answer_testing/setup.py
--- a/yt/utilities/answer_testing/setup.py
+++ b/yt/utilities/answer_testing/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/1f21d9777d3f/
Changeset:   1f21d9777d3f
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 03:18:52+00:00
Summary:     Fixing flake issues in analysis modules that have come in the last few weeks
Affected #:  4 files

diff -r ba6cf032b66bb752790911c472f688781997b8db -r 1f21d9777d3f9f9e2364e4c7fdab634f91b320f8 yt/analysis_modules/photon_simulator/tests/test_spectra.py
--- a/yt/analysis_modules/photon_simulator/tests/test_spectra.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_spectra.py
@@ -1,9 +1,8 @@
 from yt.analysis_modules.photon_simulator.api import \
     TableApecModel, XSpecThermalModel
-import numpy as np
 from yt.testing import requires_module, fake_random_ds
 from yt.utilities.answer_testing.framework import \
-    GenericArrayTest, data_dir_load
+    GenericArrayTest
 from yt.config import ytcfg
 
 def setup():

diff -r ba6cf032b66bb752790911c472f688781997b8db -r 1f21d9777d3f9f9e2364e4c7fdab634f91b320f8 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -19,11 +19,10 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
-from yt.units.yt_array import YTQuantity
-from yt.funcs import fix_axis, mylog, iterable, get_pbar
+from yt.funcs import fix_axis, mylog, get_pbar
 from yt.visualization.volume_rendering.camera import off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     communication_system, parallel_root_only
+    communication_system, parallel_root_only
 from yt import units
 from yt.utilities.on_demand_imports import _astropy
 

diff -r ba6cf032b66bb752790911c472f688781997b8db -r 1f21d9777d3f9f9e2364e4c7fdab634f91b320f8 yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -12,8 +12,17 @@
 
 from yt.frontends.stream.api import load_uniform_grid
 from yt.funcs import get_pbar
-from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, \
-    mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
+from yt.utilities.physical_ratios import \
+    cm_per_kpc, \
+    K_per_keV, \
+    cm_per_km
+from yt.utilities.physical_constants import \
+    mh, \
+    kboltz, \
+    Tcmb, \
+    hcgs, \
+    clight, \
+    sigma_thompson
 from yt.testing import requires_module, assert_almost_equal
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load, GenericImageTest

diff -r ba6cf032b66bb752790911c472f688781997b8db -r 1f21d9777d3f9f9e2364e4c7fdab634f91b320f8 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -26,7 +26,9 @@
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 
-import math, inspect, time
+import math
+import inspect
+import time
 from collections import defaultdict
 
 sep = 12


https://bitbucket.org/yt_analysis/yt/commits/c8d8bba22d1c/
Changeset:   c8d8bba22d1c
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 03:20:58+00:00
Summary:     Linting yt/utilities/lib
Affected #:  7 files

diff -r 1f21d9777d3f9f9e2364e4c7fdab634f91b320f8 -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -1,8 +1,9 @@
 #!/usr/bin/env python
 from __future__ import print_function
-import setuptools
-import os, sys, os.path, glob, \
-    tempfile, subprocess, shutil
+import os
+import tempfile
+import subprocess
+import shutil
 
 def check_for_openmp():
     # Create a temporary directory
@@ -49,7 +50,7 @@
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('lib',parent_package,top_path)
-    if check_for_openmp() == True:
+    if check_for_openmp() is True:
         omp_args = ['-fopenmp']
     else:
         omp_args = None

diff -r 1f21d9777d3f9f9e2364e4c7fdab634f91b320f8 -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f yt/utilities/lib/tests/test_alt_ray_tracers.py
--- a/yt/utilities/lib/tests/test_alt_ray_tracers.py
+++ b/yt/utilities/lib/tests/test_alt_ray_tracers.py
@@ -1,12 +1,11 @@
 """Tests for non-cartesian ray tracers."""
-import nose
 import numpy as np
 
-from nose.tools import assert_equal, assert_not_equal, assert_raises, raises, \
-    assert_almost_equal, assert_true, assert_false, assert_in, assert_less_equal, \
-    assert_greater_equal
-from numpy.testing import assert_array_equal, assert_array_almost_equal
-from yt.testing import amrspace
+from yt.testing import \
+    assert_true, \
+    amrspace, \
+    assert_less_equal, \
+    assert_equal
 
 from yt.utilities.lib.alt_ray_tracers import cylindrical_ray_trace, _cyl2cart
 

diff -r 1f21d9777d3f9f9e2364e4c7fdab634f91b320f8 -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f yt/utilities/lib/tests/test_bitarray.py
--- a/yt/utilities/lib/tests/test_bitarray.py
+++ b/yt/utilities/lib/tests/test_bitarray.py
@@ -1,6 +1,7 @@
+import numpy as np
+
 import yt.utilities.lib.bitarray as ba
-import numpy as np
-from yt.testing import *
+from yt.testing import assert_equal
 
 def test_inout_bitarray():
     # Check that we can do it for bitarrays that are funny-shaped

diff -r 1f21d9777d3f9f9e2364e4c7fdab634f91b320f8 -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f yt/utilities/lib/tests/test_fill_region.py
--- a/yt/utilities/lib/tests/test_fill_region.py
+++ b/yt/utilities/lib/tests/test_fill_region.py
@@ -1,4 +1,6 @@
-from yt.testing import *
+import numpy as np
+
+from yt.testing import assert_equal
 from yt.utilities.lib.misc_utilities import fill_region
 
 NDIM = 32

diff -r 1f21d9777d3f9f9e2364e4c7fdab634f91b320f8 -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f yt/utilities/lib/tests/test_geometry_utils.py
--- a/yt/utilities/lib/tests/test_geometry_utils.py
+++ b/yt/utilities/lib/tests/test_geometry_utils.py
@@ -1,4 +1,9 @@
-from yt.testing import *
+import numpy as np
+
+from yt.testing import \
+    fake_random_ds, \
+    assert_array_less, \
+    assert_array_equal
 from yt.utilities.lib.misc_utilities import obtain_rvec, obtain_rv_vec
 
 _fields = ("density", "velocity_x", "velocity_y", "velocity_z")

diff -r 1f21d9777d3f9f9e2364e4c7fdab634f91b320f8 -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f yt/utilities/lib/tests/test_ragged_arrays.py
--- a/yt/utilities/lib/tests/test_ragged_arrays.py
+++ b/yt/utilities/lib/tests/test_ragged_arrays.py
@@ -1,6 +1,10 @@
-from yt.testing import *
 import numpy as np
+
 from yt.utilities.lib.ragged_arrays import index_unop
+from yt.testing import \
+    assert_equal, \
+    assert_rel_equal
+
 
 operations = ((np.sum, "sum"),
               (np.prod, "prod"),

diff -r 1f21d9777d3f9f9e2364e4c7fdab634f91b320f8 -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f yt/utilities/lib/tests/test_sample.py
--- a/yt/utilities/lib/tests/test_sample.py
+++ b/yt/utilities/lib/tests/test_sample.py
@@ -1,7 +1,7 @@
 import numpy as np
 
-from yt.testing import *
 from yt.utilities.lib.CICDeposit import CICSample_3
+from yt.testing import assert_allclose
 
 def setup():
     pass


https://bitbucket.org/yt_analysis/yt/commits/cf09468df56a/
Changeset:   cf09468df56a
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 03:21:20+00:00
Summary:     Linting yt/utilities/tests
Affected #:  10 files

diff -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f -r cf09468df56aded70dc71e292314708713171244 yt/utilities/tests/test_chemical_formulas.py
--- a/yt/utilities/tests/test_chemical_formulas.py
+++ b/yt/utilities/tests/test_chemical_formulas.py
@@ -1,4 +1,4 @@
-from yt.testing import *
+from yt.testing import assert_equal
 from yt.utilities.chemical_formulas import ChemicalFormula
 from yt.utilities.periodic_table import periodic_table
 

diff -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f -r cf09468df56aded70dc71e292314708713171244 yt/utilities/tests/test_coordinate_conversions.py
--- a/yt/utilities/tests/test_coordinate_conversions.py
+++ b/yt/utilities/tests/test_coordinate_conversions.py
@@ -1,4 +1,6 @@
-from yt.testing import *
+import numpy as np
+
+from yt.testing import assert_array_almost_equal
 from yt.utilities.math_utils import \
     get_sph_r_component, \
     get_sph_theta_component, \

diff -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f -r cf09468df56aded70dc71e292314708713171244 yt/utilities/tests/test_cosmology.py
--- a/yt/utilities/tests/test_cosmology.py
+++ b/yt/utilities/tests/test_cosmology.py
@@ -14,7 +14,9 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+import numpy as np
+
+from yt.testing import assert_rel_equal
 from yt.utilities.cosmology import \
      Cosmology
 

diff -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f -r cf09468df56aded70dc71e292314708713171244 yt/utilities/tests/test_fits_image.py
--- a/yt/utilities/tests/test_fits_image.py
+++ b/yt/utilities/tests/test_fits_image.py
@@ -15,7 +15,6 @@
 
 import tempfile
 import os
-import numpy as np
 import shutil
 from yt.testing import fake_random_ds, requires_module
 from yt.convenience import load
@@ -82,8 +81,8 @@
     temp_img = fid2.pop("temperature")
 
     # This already has some assertions in it, so we don't need to do anything
-    # with it other can just make one
-    fid_comb = FITSImageData.from_images([dens_img, temp_img])
+    # with it other than just make one
+    FITSImageData.from_images([dens_img, temp_img])
 
     cut = ds.cutting([0.1, 0.2, -0.9], [0.5, 0.42, 0.6])
     cut_frb = cut.to_frb((0.5, "unitary"), 128)

diff -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f -r cf09468df56aded70dc71e292314708713171244 yt/utilities/tests/test_flagging_methods.py
--- a/yt/utilities/tests/test_flagging_methods.py
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -1,4 +1,6 @@
-from yt.testing import *
+import numpy as np
+
+from yt.testing import fake_random_ds
 from yt.utilities.flagging_methods import flagging_method_registry
 
 def setup():

diff -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f -r cf09468df56aded70dc71e292314708713171244 yt/utilities/tests/test_interpolators.py
--- a/yt/utilities/tests/test_interpolators.py
+++ b/yt/utilities/tests/test_interpolators.py
@@ -1,7 +1,12 @@
-from yt.testing import *
+import numpy as np
+
+from yt.testing import \
+    assert_array_equal, \
+    assert_array_almost_equal, \
+    fake_random_ds
 import yt.utilities.linear_interpolators as lin
 from yt.utilities.lib.Interpolators import \
-        ghost_zone_interpolate
+    ghost_zone_interpolate
 
 def setup():
     pass

diff -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f -r cf09468df56aded70dc71e292314708713171244 yt/utilities/tests/test_kdtrees.py
--- a/yt/utilities/tests/test_kdtrees.py
+++ b/yt/utilities/tests/test_kdtrees.py
@@ -13,13 +13,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+import numpy as np
 
+from yt.testing import \
+    assert_array_almost_equal, \
+    assert_array_equal
 from yt.utilities.spatial import cKDTree
 
-def setup():
-    pass
-
 
 def test_cython_tree():
     r"""This test makes sure that the cython kdtree is finding the correct

diff -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f -r cf09468df56aded70dc71e292314708713171244 yt/utilities/tests/test_particle_generator.py
--- a/yt/utilities/tests/test_particle_generator.py
+++ b/yt/utilities/tests/test_particle_generator.py
@@ -1,7 +1,11 @@
 import numpy as np
-from yt.mods import *
-from yt.testing import *
-from yt.utilities.particle_generator import *
+from yt.testing import \
+    assert_almost_equal, \
+    assert_equal
+from yt.utilities.particle_generator import \
+    WithDensityParticleGenerator, \
+    LatticeParticleGenerator, \
+    FromListParticleGenerator
 from yt.frontends.stream.api import load_uniform_grid, refine_amr
 import yt.utilities.initial_conditions as ic
 import yt.utilities.flagging_methods as fm

diff -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f -r cf09468df56aded70dc71e292314708713171244 yt/utilities/tests/test_periodic_table.py
--- a/yt/utilities/tests/test_periodic_table.py
+++ b/yt/utilities/tests/test_periodic_table.py
@@ -1,4 +1,4 @@
-from yt.testing import *
+from yt.testing import assert_equal
 from yt.utilities.periodic_table import _elements, periodic_table
 
 def test_element_accuracy():

diff -r c8d8bba22d1c2d0e6a83e11cd47b755409fe591f -r cf09468df56aded70dc71e292314708713171244 yt/utilities/tests/test_periodicity.py
--- a/yt/utilities/tests/test_periodicity.py
+++ b/yt/utilities/tests/test_periodicity.py
@@ -1,4 +1,8 @@
-from yt.testing import *
+import numpy as np
+
+from yt.testing import \
+    fake_random_ds, \
+    assert_almost_equal
 from yt.utilities.math_utils import euclidean_dist, periodic_dist
 
 def setup():


https://bitbucket.org/yt_analysis/yt/commits/21fea2d1c603/
Changeset:   21fea2d1c603
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 03:21:40+00:00
Summary:     Linting yt/utilities/grid_data_format/
Affected #:  6 files

diff -r cf09468df56aded70dc71e292314708713171244 -r 21fea2d1c603d024656849432b6d0bffd6067747 yt/utilities/grid_data_format/conversion/conversion_athena.py
--- a/yt/utilities/grid_data_format/conversion/conversion_athena.py
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -1,15 +1,9 @@
 from __future__ import print_function
 from __future__ import absolute_import
-import os
-import weakref
 import numpy as np
+from yt.utilities.grid_data_format.conversion.conversion_abc import Converter
 from yt.utilities.on_demand_imports import _h5py as h5
-from .conversion_abc import *
 from glob import glob
-from collections import \
-    defaultdict
-from stat import \
-    ST_CTIME
 
 translation_dict = {}
 translation_dict['density'] = 'density'
@@ -40,7 +34,7 @@
         self.handle = None
 
     def parse_line(self,line, grid):
-    #    print line
+        # print line
         # grid is a dictionary
         splitup = line.strip().split()
         if "vtk" in splitup:
@@ -83,7 +77,7 @@
         except:
             pass
         # print 'Writing %s' % name
-        if not name in g.keys(): 
+        if name not in g.keys():
             g.create_dataset(name,data=data)
         
 
@@ -111,7 +105,6 @@
             grid = {}
             grid['read_field'] = None
             grid['read_type'] = None
-            table_read=False
             line = f.readline()
             while grid['read_field'] is None:
                 self.parse_line(line, grid)
@@ -150,9 +143,9 @@
         g = f.create_group('gridded_data_format')
         g.attrs['format_version']=np.float32(1.0)
         g.attrs['data_software']='athena'
-        data_g = f.create_group('data')
-        field_g = f.create_group('field_types')
-        part_g = f.create_group('particle_types')
+        f.create_group('data')
+        f.create_group('field_types')
+        f.create_group('particle_types')
         pars_g = f.create_group('simulation_parameters')
 
 
@@ -161,24 +154,23 @@
         dle = np.min(gles,axis=0)
         dre = np.max(gles+grid_dims*grid_dds,axis=0)
         glis = ((gles - dle)/grid_dds).astype('int64')
-        gris = glis + gdims
 
         ddims = (dre-dle)/grid_dds[0]
 
         # grid_left_index
-        gli = f.create_dataset('grid_left_index',data=glis)
+        f.create_dataset('grid_left_index',data=glis)
         # grid_dimensions
-        gdim = f.create_dataset('grid_dimensions',data=gdims)
+        f.create_dataset('grid_dimensions',data=gdims)
 
         # grid_level
-        level = f.create_dataset('grid_level',data=grid_levels)
+        f.create_dataset('grid_level',data=grid_levels)
 
         ## ----------QUESTIONABLE NEXT LINE--------- ##
         # This data needs two dimensions for now. 
-        part_count = f.create_dataset('grid_particle_count',data=grid_particle_counts)
+        f.create_dataset('grid_particle_count',data=grid_particle_counts)
 
         # grid_parent_id
-        pids = f.create_dataset('grid_parent_id',data=grid_parent_ids)
+        f.create_dataset('grid_parent_id',data=grid_parent_ids)
 
         ## --------- Done with top level nodes --------- ##
 
@@ -318,7 +310,7 @@
 
 
     def parse_line(self, line, grid):
-    #    print line
+        #    print line
         # grid is a dictionary
         splitup = line.strip().split()
         if "vtk" in splitup:
@@ -401,34 +393,33 @@
         g.attrs['data_software']='athena'
         data_g = f.create_group('data')
         field_g = f.create_group('field_types')
-        part_g = f.create_group('particle_types')
+        f.create_group('particle_types')
         pars_g = f.create_group('simulation_parameters')
 
         dle = grid['left_edge'] # True only in this case of one grid for the domain
         gles = np.array([grid['left_edge']])
         gdims = np.array([grid['dimensions']])
         glis = ((gles - dle)/grid['dds']).astype('int64')
-        gris = glis + gdims
 
         # grid_left_index
-        gli = f.create_dataset('grid_left_index',data=glis)
+        f.create_dataset('grid_left_index',data=glis)
         # grid_dimensions
-        gdim = f.create_dataset('grid_dimensions',data=gdims)
+        f.create_dataset('grid_dimensions',data=gdims)
 
         levels = np.array([0]).astype('int64') # unigrid example
         # grid_level
-        level = f.create_dataset('grid_level',data=levels)
+        f.create_dataset('grid_level',data=levels)
 
         ## ----------QUESTIONABLE NEXT LINE--------- ##
         # This data needs two dimensions for now. 
         n_particles = np.array([[0]]).astype('int64')
         #grid_particle_count
-        part_count = f.create_dataset('grid_particle_count',data=n_particles)
+        f.create_dataset('grid_particle_count',data=n_particles)
 
         # Assume -1 means no parent.
         parent_ids = np.array([-1]).astype('int64')
         # grid_parent_id
-        pids = f.create_dataset('grid_parent_id',data=parent_ids)
+        f.create_dataset('grid_parent_id',data=parent_ids)
 
         ## --------- Done with top level nodes --------- ##
 
@@ -441,7 +432,7 @@
             name = field
             if field in translation_dict.keys():
                 name = translation_dict[name]
-            if not name in g0.keys(): 
+            if name not in g0.keys(): 
                 g0.create_dataset(name,data=grid[field])
 
         ## --------- Store Particle Data --------- ##

diff -r cf09468df56aded70dc71e292314708713171244 -r 21fea2d1c603d024656849432b6d0bffd6067747 yt/utilities/grid_data_format/conversion/setup.py
--- a/yt/utilities/grid_data_format/conversion/setup.py
+++ b/yt/utilities/grid_data_format/conversion/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r cf09468df56aded70dc71e292314708713171244 -r 21fea2d1c603d024656849432b6d0bffd6067747 yt/utilities/grid_data_format/scripts/convert_distributed_athena.py
--- a/yt/utilities/grid_data_format/scripts/convert_distributed_athena.py
+++ b/yt/utilities/grid_data_format/scripts/convert_distributed_athena.py
@@ -1,4 +1,4 @@
-from grid_data_format import *
+from grid_data_format import AthenaDistributedConverter
 import sys
 # Assumes that last input is the basename for the athena dataset.
 # i.e. kh_3d_mhd_hlld_128_beta5000_sub_tanhd.0030

diff -r cf09468df56aded70dc71e292314708713171244 -r 21fea2d1c603d024656849432b6d0bffd6067747 yt/utilities/grid_data_format/scripts/convert_single_athena.py
--- a/yt/utilities/grid_data_format/scripts/convert_single_athena.py
+++ b/yt/utilities/grid_data_format/scripts/convert_single_athena.py
@@ -1,4 +1,4 @@
-from grid_data_format import *
+from grid_data_format import AthenaConverter
 import sys
 # Assumes that last input is the basename for the athena dataset.
 # i.e. kh_3d_mhd_hlld_128_beta5000_sub_tanhd.0030

diff -r cf09468df56aded70dc71e292314708713171244 -r 21fea2d1c603d024656849432b6d0bffd6067747 yt/utilities/grid_data_format/setup.py
--- a/yt/utilities/grid_data_format/setup.py
+++ b/yt/utilities/grid_data_format/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r cf09468df56aded70dc71e292314708713171244 -r 21fea2d1c603d024656849432b6d0bffd6067747 yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -341,7 +341,7 @@
         grid_group = g.create_group("grid_%010i" % (grid.id - grid._id_offset))
         # add group for the particles on this grid
         particles_group = grid_group.create_group("particles")
-        pt_group = particles_group.create_group(particle_type_name)
+        particles_group.create_group(particle_type_name)
 
     yield f
     


https://bitbucket.org/yt_analysis/yt/commits/0fb9b1e94124/
Changeset:   0fb9b1e94124
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 03:22:16+00:00
Summary:     Linting yt/utilities/parallel_tools/
Affected #:  4 files

diff -r 21fea2d1c603d024656849432b6d0bffd6067747 -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b yt/utilities/parallel_tools/controller_system.py
--- a/yt/utilities/parallel_tools/controller_system.py
+++ b/yt/utilities/parallel_tools/controller_system.py
@@ -12,15 +12,10 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-    
-try:
-    from .parallel_analysis_interface import MPI
-except ImportError:
-    pass
+
 from .parallel_analysis_interface import \
     ProcessorPool
-from contextmanager import contextlib
-from abc import ABCMeta, abstractmethod, abstractproperty
+from abc import abstractmethod
 
 class WorkSplitter(object):
     def __init__(self, controller, group1, group2):

diff -r 21fea2d1c603d024656849432b6d0bffd6067747 -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b yt/utilities/parallel_tools/io_runner.py
--- a/yt/utilities/parallel_tools/io_runner.py
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -13,7 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import os
 import np
 from yt.utilities.logger import ytLogger as mylog
 from .parallel_analysis_interface import \

diff -r 21fea2d1c603d024656849432b6d0bffd6067747 -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -124,7 +124,7 @@
     ytcfg["yt","__global_parallel_size"] = str(communicator.size)
     ytcfg["yt","__parallel"] = "True"
     if exe_name == "embed_enzo" or \
-        ("_parallel" in dir(sys) and sys._parallel == True):
+        ("_parallel" in dir(sys) and sys._parallel is True):
         ytcfg["yt","inline"] = "True"
     if communicator.rank > 0:
         if ytcfg.getboolean("yt","LogFile"):
@@ -270,7 +270,7 @@
             if attrname.startswith("_") or attrname in skip:
                 if attrname not in extra: continue
             attr = getattr(cls, attrname)
-            if type(attr) == types.MethodType:
+            if isinstance(attr, types.MethodType):
                 setattr(cls, attrname, parallel_simple_proxy(attr))
 
 def parallel_passthrough(func):
@@ -323,7 +323,7 @@
             try:
                 rv = func(*args, **kwargs)
                 all_clear = 1
-            except Exception as ex:
+            except Exception:
                 traceback.print_last()
                 all_clear = 0
         else:
@@ -918,7 +918,7 @@
 
     def get_filename(self, prefix, rank=None):
         if not self._distributed: return prefix
-        if rank == None:
+        if rank is None:
             return "%s_%04i" % (prefix, self.comm.rank)
         else:
             return "%s_%04i" % (prefix, rank)
@@ -1247,7 +1247,7 @@
                 if f in xyzfactors[nextdim]:
                     cuts.append([nextdim, f])
                     topop = xyzfactors[nextdim].index(f)
-                    temp = xyzfactors[nextdim].pop(topop)
+                    xyzfactors[nextdim].pop(topop)
                     lastdim = nextdim
                     break
                 nextdim = (nextdim + 1) % 3

diff -r 21fea2d1c603d024656849432b6d0bffd6067747 -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b yt/utilities/parallel_tools/task_queue.py
--- a/yt/utilities/parallel_tools/task_queue.py
+++ b/yt/utilities/parallel_tools/task_queue.py
@@ -14,9 +14,8 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-import time, threading, random
 
-from yt.funcs import *
+from yt.funcs import mylog
 from .parallel_analysis_interface import \
     communication_system, \
     _get_comm, \


https://bitbucket.org/yt_analysis/yt/commits/a32b5e2a781f/
Changeset:   a32b5e2a781f
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 03:22:40+00:00
Summary:     Lintint yt/utilities/
Affected #:  29 files

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/amr_kdtree/amr_kdtools.py
--- a/yt/utilities/amr_kdtree/amr_kdtools.py
+++ b/yt/utilities/amr_kdtree/amr_kdtools.py
@@ -13,7 +13,7 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-from yt.funcs import *
+from yt.funcs import mylog
 
 
 def receive_and_reduce(comm, incoming_rank, image, add_to_front):

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -14,18 +14,28 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.funcs import *
 import numpy as np
+
+from yt.funcs import mylog
 from yt.utilities.on_demand_imports import _h5py as h5py
-from .amr_kdtools import \
-        receive_and_reduce, send_to_parent, scatter_image
-
-from yt.utilities.lib.amr_kdtools import Node, add_pygrids, find_node, \
-        kd_is_leaf, depth_traverse, depth_first_touch, viewpoint_traverse, \
-        kd_traverse, \
-        get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
-from yt.utilities.parallel_tools.parallel_analysis_interface \
-    import ParallelAnalysisInterface 
+from yt.utilities.amr_kdtree.amr_kdtools import \
+    receive_and_reduce, \
+    send_to_parent, \
+    scatter_image
+from yt.utilities.lib.amr_kdtools import \
+    Node, \
+    add_pygrids, \
+    find_node, \
+    kd_is_leaf, \
+    depth_traverse, \
+    depth_first_touch, \
+    kd_traverse, \
+    get_left_edge, \
+    get_right_edge, \
+    kd_sum_volume, \
+    kd_node_check
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    ParallelAnalysisInterface
 from yt.utilities.lib.grid_traversal import PartitionedGrid
 from yt.utilities.math_utils import periodic_position
 
@@ -342,18 +352,18 @@
         new_positions = [periodic_position(p, self.ds) for p in new_positions]
         grids[in_grid] = grid
                 
-        get_them = np.argwhere(in_grid != True).ravel()
+        get_them = np.argwhere(in_grid).ravel()
         cis[in_grid] = new_cis[in_grid]
 
-        if (in_grid != True).sum()>0:
-            grids[in_grid != True] = \
+        if (in_grid).sum()>0:
+            grids[np.logical_not(in_grid)] = \
                 [self.ds.index.grids[self.locate_brick(new_positions[i]).grid -
                                  self._id_offset]
                  for i in get_them]
-            cis[in_grid != True] = \
+            cis[np.logical_not(in_grid)] = \
                 [(new_positions[i]-grids[i].LeftEdge)/
                  grids[i].dds for i in get_them]
-        cis = [tuple(ci) for ci in cis]
+        cis = [tuple(_ci) for _ci in cis]
         return grids, cis
 
     def locate_neighbors_from_position(self, position):
@@ -457,7 +467,7 @@
         splitdims = self.comm.par_combine_object(splitdims, 'cat', 'list') 
         splitposs = self.comm.par_combine_object(splitposs, 'cat', 'list') 
         nid = np.array(nid)
-        new_tree = self.rebuild_tree_from_array(nid, pid, lid, 
+        self.rebuild_tree_from_array(nid, pid, lid,
             rid, les, res, gid, splitdims, splitposs)
 
     def get_node_arrays(self):
@@ -536,9 +546,9 @@
         return self.tree.sum_cells() 
 
 if __name__ == "__main__":
-    from yt.mods import *
+    import yt
     from time import time
-    ds = load('/Users/skillman/simulations/DD1717/DD1717')
+    ds = yt.load('/Users/skillman/simulations/DD1717/DD1717')
     ds.index
 
     t1 = time()

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/answer_testing/level_sets_tests.py
--- a/yt/utilities/answer_testing/level_sets_tests.py
+++ b/yt/utilities/answer_testing/level_sets_tests.py
@@ -15,10 +15,10 @@
 
 import numpy as np
 
-from yt.testing import *
-
-from .framework import \
-    AnswerTestingTest, requires_ds, data_dir_load
+from yt.testing import \
+    assert_equal
+from yt.utilities.answer_testing.framework import \
+    AnswerTestingTest
 
 class ExtractConnectedSetsTest(AnswerTestingTest):
     _type_name = "ExtractConnectedSets"

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/chemical_formulas.py
--- a/yt/utilities/chemical_formulas.py
+++ b/yt/utilities/chemical_formulas.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-import string
 import re
 from .periodic_table import periodic_table
 

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -15,16 +15,39 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import argparse
+import base64
+import getpass
+import numpy as np
+import os
+import sys
+import subprocess
+import tempfile
+import json
+import pprint
+
+
 from yt.config import ytcfg
 ytcfg["yt","__command_line"] = "True"
 from yt.startup_tasks import parser, subparsers
-from yt.mods import *
-from yt.funcs import *
+from yt.funcs import \
+    ensure_list, \
+    get_hg_version, \
+    mylog, \
+    ensure_dir_exists, \
+    update_hg, \
+    enable_plugins
 from yt.extern.six import add_metaclass
 from yt.extern.six.moves import urllib
-from yt.utilities.minimal_representation import MinimalProjectDescription
-import argparse, os, os.path, math, sys, time, subprocess, getpass, tempfile
-import base64, os
+from yt.convenience import load
+from yt.visualization.plot_window import \
+    SlicePlot, \
+    ProjectionPlot
+
+# loading field plugins for backward compatibility, since this module
+# used to do "from yt.mods import *"
+if ytcfg.getboolean("yt","loadfieldplugins"):
+    enable_plugins()
 
 def _fix_ds(arg):
     if os.path.isdir("%s" % arg) and \
@@ -419,11 +442,11 @@
         if "EDITOR" in os.environ:
             print()
             print("Press enter to spawn your editor, %s" % os.environ["EDITOR"])
-            loki = raw_input()
+            raw_input()
             tf = tempfile.NamedTemporaryFile(delete=False)
             fn = tf.name
             tf.close()
-            popen = subprocess.call("$EDITOR %s" % fn, shell = True)
+            subprocess.call("$EDITOR %s" % fn, shell = True)
             content = open(fn).read()
             try:
                 os.unlink(fn)
@@ -464,7 +487,7 @@
         print("'submit'.  Next we'll ask for your Bitbucket Username.")
         print("If you don't have one, run the 'yt bootstrap_dev' command.")
         print()
-        loki = raw_input()
+        raw_input()
         retval = bb_apicall(endpoint, data, use_pass=True)
         import json
         retval = json.loads(retval)
@@ -536,7 +559,7 @@
         print("Okay, press enter to register.  You should receive a welcome")
         print("message at %s when this is complete." % email)
         print()
-        loki = raw_input()
+        raw_input()
         data = dict(name = name, email = email, username = username,
                     password = password1, password2 = password2,
                     url = url, zap = "rowsdower")
@@ -544,7 +567,7 @@
         hub_url = "https://hub.yt-project.org/create_user"
         req = urllib.request.Request(hub_url, data)
         try:
-            status = urllib.request.urlopen(req).read()
+            urllib.request.urlopen(req).read()
         except urllib.error.HTTPError as exc:
             if exc.code == 400:
                 print("Sorry, the Hub couldn't create your user.")
@@ -585,14 +608,12 @@
         print()
         print("yt module located at:")
         print("    %s" % (path))
-        update_supp = False
         if "YT_DEST" in os.environ:
             spath = os.path.join(
                      os.environ["YT_DEST"], "src", "yt-supplemental")
             if os.path.isdir(spath):
                 print("The supplemental repositories are located at:")
                 print("    %s" % (spath))
-                update_supp = True
         vstring = get_yt_version()
         if vstring == -1:
             vstring = "unknown"
@@ -693,7 +714,7 @@
         else:
             p = SlicePlot(ds, args.axis, args.field)
         from yt.visualization.mapserver.pannable_map import PannableMapServer
-        mapper = PannableMapServer(p.data_source, args.field)
+        PannableMapServer(p.data_source, args.field)
         import yt.extern.bottle as bottle
         bottle.debug(True)
         bottle_dir = os.path.dirname(bottle.__file__)
@@ -970,11 +991,11 @@
         ds.print_stats()
         vals = {}
         if args.field in ds.derived_field_list:
-            if args.max == True:
+            if args.max is True:
                 vals['min'] = ds.find_max(args.field)
                 print("Maximum %s: %0.5e at %s" % (args.field,
                     vals['min'][0], vals['min'][1]))
-            if args.min == True:
+            if args.min is True:
                 vals['max'] = ds.find_min(args.field)
                 print("Minimum %s: %0.5e at %s" % (args.field,
                     vals['max'][0], vals['max'][1]))
@@ -1006,14 +1027,12 @@
         print()
         print("yt module located at:")
         print("    %s" % (path))
-        update_supp = False
         if "YT_DEST" in os.environ:
             spath = os.path.join(
                      os.environ["YT_DEST"], "src", "yt-supplemental")
             if os.path.isdir(spath):
                 print("The supplemental repositories are located at:")
                 print("    %s" % (spath))
-                update_supp = True
         vstring = None
         if "site-packages" not in path:
             vstring = get_hg_version(path)
@@ -1052,7 +1071,6 @@
         if not filename.endswith(".png"):
             print("File must be a PNG file!")
             return 1
-        import base64, json, pprint
         image_data = base64.b64encode(open(filename, 'rb').read())
         api_key = 'f62d550859558f28c4c214136bc797c7'
         parameters = {'key':api_key, 'image':image_data, type:'base64',

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -166,7 +166,7 @@
                      (self.comoving_transverse_distance(z_i, z_f) /
                       self.hubble_distance() * 
                       np.sqrt(1 + self.omega_curvature * 
-                           sqr(self.comoving_transverse_distance(z_i, z_f) /
+                           np.sqrt(self.comoving_transverse_distance(z_i, z_f) /
                                self.hubble_distance())) - 
                       np.sinh(np.fabs(self.omega_curvature) * 
                             self.comoving_transverse_distance(z_i, z_f) /
@@ -178,7 +178,7 @@
                      (self.comoving_transverse_distance(z_i, z_f) /
                       self.hubble_distance() * 
                       np.sqrt(1 + self.omega_curvature * 
-                           sqr(self.comoving_transverse_distance(z_i, z_f) /
+                           np.sqrt(self.comoving_transverse_distance(z_i, z_f) /
                                self.hubble_distance())) - 
                       np.arcsin(np.fabs(self.omega_curvature) * 
                            self.comoving_transverse_distance(z_i, z_f) /

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/definitions.py
--- a/yt/utilities/definitions.py
+++ b/yt/utilities/definitions.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from .physical_constants import \
+from .physical_ratios import \
    mpc_per_mpc, kpc_per_mpc, pc_per_mpc, au_per_mpc, rsun_per_mpc, \
    miles_per_mpc, km_per_mpc, cm_per_mpc, sec_per_Gyr, sec_per_Myr, \
    sec_per_year, sec_per_day

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -416,9 +416,6 @@
             self.ma)
         return v
 
-class YTEmptyProfileData(Exception):
-    pass
-
 class YTTooParallel(YTException):
     def __str__(self):
         return "You've used too many processors for this dataset."

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -440,7 +440,7 @@
             nx, ny = image_res
         else:
             nx, ny = image_res, image_res
-    dx, dy = width[0]/nx, width[1]/ny
+    dx = width[0]/nx
     crpix = [0.5*(nx+1), 0.5*(ny+1)]
     if hasattr(ds, "wcs") and not iterable(axis):
         # This is a FITS dataset, so we use it to construct the WCS

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -143,12 +143,11 @@
     def find_by_second_derivative(self):
         max_strength = 0
         max_axis = -1
-        max_ind = -1
         for dim in range(3):
             sig = self.sigs[dim]
             sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
             center = int((self.flagged.shape[dim] - 1) / 2)
-            strength = zero_strength = 0
+            strength = zero_strength = zero_cross = 0
             for i in range(1, sig.size-2):
                 # Note that sd is offset by one
                 if sd[i-1] * sd[i] < 0:
@@ -162,7 +161,6 @@
                         zero_cross = i
             if zero_strength > max_strength:
                 max_axis = dim
-                max_ind = zero_cross
         dims = self.dimensions.copy()
         li = self.left_index.copy()
         dims[max_axis] = zero_cross

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/fortran_utils.py
--- a/yt/utilities/fortran_utils.py
+++ b/yt/utilities/fortran_utils.py
@@ -290,7 +290,6 @@
     net_format += "I"
     size = struct.calcsize(net_format)
     vals = list(struct.unpack(net_format, f.read(size)))
-    vvv = vals[:]
     s1, s2 = vals.pop(0), vals.pop(-1)
     if s1 != s2:
         print("S1 = %s ; S2 = %s ; SIZE = %s")

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/initial_conditions.py
--- a/yt/utilities/initial_conditions.py
+++ b/yt/utilities/initial_conditions.py
@@ -14,7 +14,6 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-from yt.units.yt_array import YTQuantity
 
 class FluidOperator(object):
     def apply(self, ds):

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -16,8 +16,6 @@
 from collections import defaultdict
 from contextlib import contextmanager
 
-from yt.funcs import mylog
-from yt.extern.six.moves import cPickle
 import os
 from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -16,7 +16,7 @@
 
 import numpy as np
 
-from yt.funcs import *
+from yt.funcs import mylog
 import yt.utilities.lib.Interpolators as lib
 
 class UnilinearFieldInterpolator:

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -129,8 +129,8 @@
     c = np.empty((2,) + a.shape, dtype="float64")
     c[0,:] = np.abs(a - b)
     
-    p_directions = [i for i,p in enumerate(periodicity) if p == True]
-    np_directions = [i for i,p in enumerate(periodicity) if p == False]
+    p_directions = [i for i,p in enumerate(periodicity) if p is True]
+    np_directions = [i for i,p in enumerate(periodicity) if p is False]
     for d in p_directions:
         c[1,d,:] = period[d,:] - np.abs(a - b)[d,:]
     for d in np_directions:

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -165,7 +165,7 @@
         metadata['obj_type'] = self.type
         with h5.File(storage) as h5f:
             dset = str(uuid4())[:8]
-            grp = h5f.create_group(dset)
+            h5f.create_group(dset)
             _serialize_to_h5(h5f[dset], metadata)
             if len(chunks) > 0:
                 g = h5f[dset].create_group('chunks')
@@ -223,7 +223,6 @@
         uploader_info = json.loads(rv)
         new_url = url + "/handler/%s" % uploader_info['handler_uuid']
         for i, (cn, cv) in enumerate(chunks):
-            remaining = cv.size * cv.itemsize
             f = TemporaryFile()
             np.save(f, cv)
             f.seek(0)

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/on_demand_imports.py
--- a/yt/utilities/on_demand_imports.py
+++ b/yt/utilities/on_demand_imports.py
@@ -265,11 +265,10 @@
     def version(self):
         if self._version is None:
             try:
-                import h5py.version as File
+                import h5py.version as version
             except ImportError:
                 version = NotAModule(self._name)
-            self._version = File
+            self._version = version
         return self._version
 
 _h5py = h5py_imports()
-

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/operator_registry.py
--- a/yt/utilities/operator_registry.py
+++ b/yt/utilities/operator_registry.py
@@ -14,7 +14,6 @@
 #-----------------------------------------------------------------------------
 
 import copy
-import types
 
 class OperatorRegistry(dict):
     def find(self, op, *args, **kwargs):

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -16,8 +16,8 @@
 
 import numpy as np
 
-from yt.funcs import *
-from yt.utilities.math_utils import get_rotation_matrix
+from yt.funcs import mylog
+from yt.units.yt_array import YTArray
 
 class Orientation:
     def __init__(self, normal_vector, north_vector=None, steady_north=False):

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/parameter_file_storage.py
--- a/yt/utilities/parameter_file_storage.py
+++ b/yt/utilities/parameter_file_storage.py
@@ -18,7 +18,7 @@
 from itertools import islice
 
 from yt.config import ytcfg
-from yt.funcs import *
+from yt.funcs import mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_simple_proxy
 
@@ -66,7 +66,7 @@
         Otherwise, use read-only settings.
 
         """
-        if self._register == False: return
+        if self._register is False: return
         if ytcfg.getboolean("yt", "StoreParameterFiles"):
             self._read_only = False
             self.init_db()

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/particle_generator.py
--- a/yt/utilities/particle_generator.py
+++ b/yt/utilities/particle_generator.py
@@ -1,7 +1,7 @@
 import numpy as np
 from yt.utilities.lib.CICDeposit import CICSample_3
-from yt.funcs import *
-from yt.units.yt_array import uconcatenate, YTArray
+from yt.funcs import get_pbar
+from yt.units.yt_array import uconcatenate
 from yt.extern.six import string_types
 
 class ParticleGenerator(object):

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/performance_counters.py
--- a/yt/utilities/performance_counters.py
+++ b/yt/utilities/performance_counters.py
@@ -13,12 +13,16 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import atexit
+import time
+
+from bisect import insort
+from collections import defaultdict
+from datetime import datetime as dt
+from functools import wraps
+
 from yt.config import ytcfg
-from yt.funcs import *
-import time
-from datetime import datetime as dt
-from bisect import insort
-import atexit
+from yt.funcs import mylog
 
 class PerformanceCounters(object):
     _shared_state = {}

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/periodic_table.py
--- a/yt/utilities/periodic_table.py
+++ b/yt/utilities/periodic_table.py
@@ -15,7 +15,6 @@
 
 import numpy as np
 import numbers
-import types
 
 _elements = (
     (1, 1.0079400000, "Hydrogen", "H"),

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -1,6 +1,27 @@
-from yt.utilities.physical_ratios import *
+from math import pi
+
+from yt.utilities.physical_ratios import \
+    mass_electron_grams, \
+    amu_grams, \
+    mass_hydrogen_grams, \
+    speed_of_light_cm_per_s, \
+    boltzmann_constant_erg_per_K, \
+    mass_sun_grams, \
+    mass_jupiter_grams, \
+    mass_mercury_grams, \
+    mass_venus_grams, \
+    mass_earth_grams, \
+    mass_mars_grams, \
+    mass_saturn_grams, \
+    mass_uranus_grams, \
+    mass_neptune_grams, \
+    planck_mass_grams, \
+    planck_length_cm, \
+    planck_time_s, \
+    planck_energy_erg, \
+    planck_charge_esu, \
+    planck_temperature_K
 from yt.units.yt_array import YTQuantity
-from math import pi
 
 mass_electron_cgs = YTQuantity(mass_electron_grams, 'g')
 mass_electron = mass_electron_cgs

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/rpdb.py
--- a/yt/utilities/rpdb.py
+++ b/yt/utilities/rpdb.py
@@ -14,7 +14,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import cmd, pdb, socket, sys
+import cmd
+import pdb
+import socket
+import sys
 from yt.extern.six.moves import StringIO
 import traceback
 import signal

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -1,6 +1,5 @@
 from __future__ import print_function
 from yt.extern.six.moves import cStringIO
-import re
 import os
 import numpy as np
 try:
@@ -387,8 +386,6 @@
         assert 'struct' in line
 
         str_types = []
-        comments = []
-        str_lines = []
         l = ascfile.readline()
         while "}" not in l:
             vtype, vnames = _get_struct_vars(l)
@@ -1036,10 +1033,7 @@
         #right = right.astype('float32')
 
         #my_filter = bbox_filter(left, right, self.true_domain_width)
-        data = []
-        for dd in self.filter_bbox(
-            left, right,
-            self.iter_data(inds, fields)):
+        for dd in self.filter_bbox(left, right, self.iter_data(inds, fields)):
             yield dd
         #for dd in self.filter_particles(
         #    self.iter_data(inds, fields),
@@ -1066,11 +1060,8 @@
         return self.iter_data(inds, fields)
 
     def get_contiguous_chunk(self, left_key, right_key, fields):
-        liarr = self.get_ind_from_key(left_key)
-        riarr = self.get_ind_from_key(right_key)
 
         lbase=0
-        llen = 0
         if left_key > self._max_key:
             raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % \
                                (left_key, self._max_key))
@@ -1080,7 +1071,6 @@
         right_key = self.get_previous_nonzero_chunk(right_key, left_key)
 
         lbase = self.indexdata['base'][left_key]
-        llen = self.indexdata['len'][left_key]
 
         rbase = self.indexdata['base'][right_key]
         rlen = self.indexdata['len'][right_key]
@@ -1247,8 +1237,7 @@
 
         pbox[1, 0] = bbox[1, 1]
         pbox[1, 1] = pbox[1, 0] + pad[1]
-        for dd in self.filter_bbox(
-            filter_left, filter_right,
+        for dd in self.filter_bbox(filter_left, filter_right,
             self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
             yield dd
             del dd
@@ -1284,9 +1273,6 @@
 
         """
         _ensure_xyz_fields(fields)
-        bbox = self.get_cell_bbox(level, cell_iarr)
-        filter_left = bbox[:, 0] - pad
-        filter_right = bbox[:, 1] + pad
 
         data = []
         for dd in self.iter_padded_bbox_data(level, cell_iarr, pad, fields):
@@ -1304,8 +1290,6 @@
 
         """
         bbox = self.get_cell_bbox(level, cell_iarr)
-        filter_left = bbox[:, 0] - pad
-        filter_right = bbox[:, 1] + pad
 
         # Need to get all of these
         low_key, high_key = self.get_key_bounds(level, cell_iarr)

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -1,152 +1,4 @@
 #!/usr/bin/env python
-from __future__ import print_function
-import os
-import sys
-import os.path
-import glob
-import platform
-
-
-# snatched from PyTables
-def add_from_path(envname, dirs):
-    try:
-        dirs.extend(os.environ[envname].split(os.pathsep))
-    except KeyError:
-        pass
-
-
-# snatched from PyTables
-def add_from_flags(envname, flag_key, dirs):
-    for flag in os.environ.get(envname, "").split():
-        if flag.startswith(flag_key):
-            dirs.append(flag[len(flag_key):])
-
-
-# snatched from PyTables
-def get_default_dirs():
-    default_header_dirs = []
-    default_library_dirs = []
-
-    add_from_path("CPATH", default_header_dirs)
-    add_from_path("C_INCLUDE_PATH", default_header_dirs)
-    add_from_flags("CPPFLAGS", "-I", default_header_dirs)
-    default_header_dirs.extend(
-        ['/usr/include', '/usr/local/include', '/usr/X11']
-    )
-
-    _archs = ['lib64', 'lib']
-    if platform.system() == 'Linux':
-        distname, version, did = platform.linux_distribution()
-        if distname.lower() in ('ubuntu', 'debian'):
-            _archs.extend(
-                ['lib/x86_64-linux-gnu',
-                 'lib/i686-linux-gnu',
-                 'lib/i386-linux-gnu']
-            )
-
-    add_from_flags("LDFLAGS", "-L", default_library_dirs)
-    default_library_dirs.extend(
-        os.path.join(_tree, _arch)
-        for _tree in ('/usr', '/usr/local', '/usr/X11', '/')
-        for _arch in _archs
-    )
-    return default_header_dirs, default_library_dirs
-
-
-def get_location_from_env(env):
-    env_dir = os.environ[env]
-    env_inc = os.path.join(env_dir, "include")
-    env_lib = os.path.join(env_dir, "lib")
-    print("%s_LOCATION: %s: %s, %s"
-          % (env.split('_')[0], env, env_inc, env_lib))
-    return (env_inc, env_lib)
-
-
-def get_location_from_cfg(cfg):
-    cfg_dir = open(cfg).read().strip()
-    cfg_inc = os.path.join(cfg_dir, "include")
-    cfg_lib = os.path.join(cfg_dir, "lib")
-    print("%s_LOCATION: %s: %s, %s"
-          % (cfg.split('.')[0].upper(), cfg, cfg_inc, cfg_lib))
-    return (cfg_inc, cfg_lib)
-
-
-def check_prefix(inc_dir, lib_dir):
-    if platform.system() == 'Linux':
-        distname, version, did = platform.linux_distribution()
-        if distname.lower() in ('ubuntu', 'debian'):
-            print("Since you are using multiarch distro it's hard to detect")
-            print("whether library matches the header file. We will assume")
-            print("it does. If you encounter any build failures please use")
-            print("proper cfg files to provide path to the dependencies")
-            print("")
-            return (inc_dir, lib_dir)
-    prefix = os.path.commonprefix([inc_dir, lib_dir]).rstrip('/\\')
-    if prefix is not '' and prefix == os.path.dirname(inc_dir):
-        return (inc_dir, lib_dir)
-    else:
-        print("It seems that include prefix is different from lib prefix")
-        print("Please use either env variable or cfg to set proper path")
-        return (None, None)
-
-
-def get_location_from_ctypes(header, library):
-    yt_inst = os.environ.get('YT_DEST')
-    if yt_inst is not None:
-        # since we prefer installation via script, make sure
-        # that YT_DEST path take precedence above all else
-        return (os.path.join(yt_inst, 'include'), os.path.join(yt_inst, 'lib'))
-
-    try:
-        import ctypes
-        import ctypes.util
-    except ImportError:
-        return (None, None)
-
-    target_inc, target_libdir = None, None
-    default_header_dirs, default_library_dirs = get_default_dirs()
-    for inc_prefix in default_header_dirs:
-        if os.path.isfile(os.path.join(inc_prefix, header)):
-            target_inc = inc_prefix
-
-    target_libfile = ctypes.util.find_library(library)
-    if None in (target_inc, target_libfile):
-        # either header or lib was not found, abort now
-        return (None, None)
-    if os.path.isfile(target_libfile):
-        return check_prefix(target_inc, os.path.dirname(target_libfile))
-    for lib_dir in default_library_dirs:
-        try:
-            ctypes.CDLL(os.path.join(lib_dir, target_libfile))
-            target_libdir = lib_dir
-        except OSError:
-            pass
-    return check_prefix(target_inc, target_libdir)
-
-
-def check_for_dependencies(env, cfg, header, library):
-    # First up: check in environment
-    if env in os.environ:
-        return get_location_from_env(env)
-    # Next up, we try config file
-    elif os.path.exists(cfg):
-        return get_location_from_cfg(cfg)
-    # Now we see if ctypes can help us
-    if os.name == 'posix' or os.name == 'nt':
-        target_inc, target_lib = get_location_from_ctypes(header, library)
-    if None not in (target_inc, target_lib):
-        print(
-            "%s_LOCATION: %s found via ctypes in: %s, %s"
-            % (env.split('_')[0], env.split('_')[0], target_inc, target_lib)
-        )
-        return (target_inc, target_lib)
-
-    print("Reading %s location from %s failed." % (env.split('_')[0], cfg))
-    print("Please place the base directory of your")
-    print("%s install in %s and restart." % (env.split('_')[0], cfg))
-    print("(ex: \"echo '/usr/local/' > %s\" )" % cfg)
-    print("You can locate the path by looking for %s" % header)
-    sys.exit(1)
 
 
 def configuration(parent_package='', top_path=None):

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/spatial/kdtree.py
--- a/yt/utilities/spatial/kdtree.py
+++ b/yt/utilities/spatial/kdtree.py
@@ -797,6 +797,7 @@
         result : dok_matrix
             Sparse matrix representing the results in "dictionary of keys" format.
         """
+        from yt.utilities.on_demand_imports import _scipy as scipy
         result = scipy.sparse.dok_matrix((self.n,other.n))
 
         def traverse(node1, rect1, node2, rect2):

diff -r 0fb9b1e9412420ad837aec6dd1f4030d06e3640b -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 yt/utilities/spatial/setupscons.py
--- a/yt/utilities/spatial/setupscons.py
+++ b/yt/utilities/spatial/setupscons.py
@@ -1,7 +1,5 @@
 #!/usr/bin/env python
 
-from os.path import join
-
 def configuration(parent_package = '', top_path = None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('spatial', parent_package, top_path)


https://bitbucket.org/yt_analysis/yt/commits/474d4b7d547e/
Changeset:   474d4b7d547e
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 03:23:58+00:00
Summary:     Removing remaining references to BinnedProfile[1-3]D
Affected #:  4 files

diff -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -121,7 +121,6 @@
     derived_field
 
 from yt.data_objects.api import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     DatasetSeries, ImageArray, \
     particle_filter, add_particle_filter, \
     create_profile, Profile1D, Profile2D, Profile3D, \

diff -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -27,11 +27,6 @@
     particle_handler_registry
 
 from .profiles import \
-    YTEmptyProfileData, \
-    BinnedProfile, \
-    BinnedProfile1D, \
-    BinnedProfile2D, \
-    BinnedProfile3D, \
     create_profile, \
     Profile1D, \
     Profile2D, \

diff -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 yt/frontends/stream/tests/test_update_data.py
--- a/yt/frontends/stream/tests/test_update_data.py
+++ b/yt/frontends/stream/tests/test_update_data.py
@@ -1,5 +1,5 @@
 from yt.testing import fake_random_ds
-from yt.data_objects.profiles import BinnedProfile1D
+from yt.data_objects.profiles import create_profile
 from numpy.random import uniform
 
 def test_update_data() :
@@ -12,9 +12,5 @@
     prj = ds.proj("temperature", 2)
     prj["temperature"]
     dd = ds.all_data()
-    profile = BinnedProfile1D(dd, 10, "density",
-                              dd["density"].min(),
-                              dd["density"].max())
-    profile.add_fields(["temperature"])
-    profile["temperature"]
-                              
+    profile = create_profile(dd, "density", "temperature", 10)
+    profile["temperature"]                             

diff -r a32b5e2a781fe9007f2ca83b03aebb78987fdd62 -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -15,7 +15,7 @@
 #-----------------------------------------------------------------------------
 
 from yt.funcs import mylog
-from yt.data_objects.profiles import BinnedProfile1D
+from yt.data_objects.profiles import create_profile
 from yt.visualization.volume_rendering.api import ColorTransferFunction
 from yt.visualization._mpl_imports import FigureCanvasAgg
 from matplotlib.figure import Figure
@@ -205,10 +205,10 @@
     def setup_profile(self, profile_field=None, profile_weight=None):
         if profile_field is None:
             profile_field = 'cell_volume'
-        prof = BinnedProfile1D(self.ds.all_data(), 128, self.field,
-                               self.bounds[0], self.bounds[1],
-                               log_space=self.log,
-                               end_collect=False)
+        prof = create_profile(self.ds.all_data(), self.field, profile_field,
+                              n_bins=128, extrema={profile_field: self.bounds},
+                              weight_field=profile_weight,
+                              log_space=self.log)
         prof.add_fields([profile_field], fractional=False,
                         weight=profile_weight)
         self.profiles[self.field] = prof


https://bitbucket.org/yt_analysis/yt/commits/1415ed1a898c/
Changeset:   1415ed1a898c
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 03:24:53+00:00
Summary:     Linting top-level submodules in the yt namespace

This also brings in several other files outside of the top-level namespace due
to transitive import issues. We now make an effort to import symbols from the
files where the symbol is defined rather than transitively importing symbols,
particularly via "import *"
Affected #:  17 files

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -16,7 +16,6 @@
 #-----------------------------------------------------------------------------
 
 import os
-import types
 from yt.extern.six.moves import configparser
 
 ytcfg_defaults = dict(

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -13,15 +13,18 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import os, os.path, types
+import os
 
 # Named imports
-from yt.funcs import *
 from yt.config import ytcfg
+from yt.funcs import mylog
 from yt.utilities.parameter_file_storage import \
     output_type_registry, \
     simulation_time_series_registry, \
     EnzoRunDatabase
+from yt.utilities.exceptions import \
+    YTOutputNotIdentified, \
+    YTSimulationNotIdentified
 from yt.utilities.hierarchy_inspection import find_lowest_subclasses
 
 def load(*args ,**kwargs):

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -24,8 +24,8 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_objects
 from yt.utilities.physical_constants import \
-    gravitational_constant_cgs, \
-    HUGE
+    gravitational_constant_cgs
+from yt.utilities.physical_ratios import HUGE
 from yt.extern.six import add_metaclass
 
 derived_quantity_registry = {}

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -15,9 +15,11 @@
 
 import os
 import numpy as np
+from collections import defaultdict
+
 from yt.utilities.io_handler import \
     BaseIOHandler
-from yt.funcs import mylog, defaultdict
+from yt.funcs import mylog
 from yt.frontends.chombo.io import parse_orion_sinks
 
 class IOHandlerBoxlib(BaseIOHandler):

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -40,7 +40,7 @@
     Dataset
 from yt.fields.field_info_container import \
     NullFunc
-from yt.utilities.physical_constants import \
+from yt.utilities.physical_ratios import \
     rho_crit_g_cm3_h2, cm_per_mpc
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.pyparselibconfig import libconfig

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -20,7 +20,8 @@
 from math import ceil
 
 from yt.convenience import \
-    load, \
+    load
+from yt.funcs import \
     only_on_root
 from yt.data_objects.time_series import \
     SimulationTimeSeries, DatasetSeries

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -27,7 +27,7 @@
     Dataset
 from yt.utilities.file_handler import \
     HDF5FileHandler
-from yt.utilities.physical_constants import cm_per_mpc
+from yt.utilities.physical_ratios import cm_per_mpc
 from .fields import FLASHFieldInfo
 
 class FLASHGrid(AMRGridPatch):

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/frontends/gadget/simulation_handling.py
--- a/yt/frontends/gadget/simulation_handling.py
+++ b/yt/frontends/gadget/simulation_handling.py
@@ -18,7 +18,8 @@
 import os
 
 from yt.convenience import \
-    load, \
+    load
+from yt.funcs import \
     only_on_root
 from yt.data_objects.time_series import \
     SimulationTimeSeries, DatasetSeries

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -15,11 +15,11 @@
 
 import os.path
 from yt.testing import \
+    requires_file, \
     assert_equal
 from yt.utilities.answer_testing.framework import \
     FieldValuesTest, \
     requires_ds, \
-    requires_file, \
     data_dir_load
 from yt.frontends.gadget_fof.api import GadgetFOFDataset
 

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -31,7 +31,8 @@
 from yt.utilities.cosmology import \
     Cosmology
 from yt.utilities.physical_constants import \
-    G, \
+    G
+from yt.utilities.physical_ratios import \
     cm_per_kpc
 
 from .fields import \

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -16,21 +16,32 @@
 
 import errno
 from yt.extern.six import string_types
-import time, types, signal, inspect, traceback, sys, pdb, os, re
+import time
+import inspect
+import traceback
+import sys
+import pdb
+import os
+import re
 import contextlib
-import warnings, struct, subprocess
+import warnings
+import struct
+import subprocess
 import numpy as np
+import itertools
+import base64
+import numpy
+import matplotlib
+import getpass
 from distutils.version import LooseVersion
 from math import floor, ceil
 from numbers import Number as numeric_type
 
 from yt.extern.six.moves import builtins, urllib
-from yt.utilities.exceptions import *
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.exceptions import YTInvalidWidthError
 import yt.extern.progressbar as pb
-import yt.utilities.rpdb as rpdb
 from yt.units.yt_array import YTArray, YTQuantity
-from collections import defaultdict
 from functools import wraps
 
 # Some functions for handling sequences and other types
@@ -171,7 +182,7 @@
         mylog.debug('%s took %0.3f s', func.__name__, (t2-t1))
         return res
     from yt.config import ytcfg
-    if ytcfg.getboolean("yt","timefunctions") == True:
+    if ytcfg.getboolean("yt","timefunctions") is True:
         return wrapper
     else:
         return func
@@ -293,7 +304,7 @@
     *num_up* refers to how many frames of the stack get stripped off, and
     defaults to 1 so that this function itself is stripped off.
     """
-
+    import IPython
     api_version = get_ipython_api_version()
 
     frame = inspect.stack()[num_up]
@@ -466,7 +477,6 @@
 
 _ss = "fURbBUUBE0cLXgETJnZgJRMXVhVGUQpQAUBuehQMUhJWRFFRAV1ERAtBXw1dAxMLXT4zXBFfABNN\nC0ZEXw1YUURHCxMXVlFERwxWCQw=\n"
 def _rdbeta(key):
-    import itertools, base64
     enc_s = base64.decodestring(_ss)
     dec_s = ''.join([ chr(ord(a) ^ ord(b)) for a, b in zip(enc_s, itertools.cycle(key)) ])
     print(dec_s)
@@ -541,12 +551,10 @@
     return version
 
 def get_version_stack():
-    import numpy, matplotlib, h5py
     version_info = {}
     version_info['yt'] = get_yt_version()
     version_info['numpy'] = numpy.version.version
     version_info['matplotlib'] = matplotlib.__version__
-    version_info['h5py'] = h5py.version.version
     return version_info
 
 def get_script_contents():
@@ -585,6 +593,7 @@
     return urllib.request.urlopen(req).read()
 
 def get_yt_supp():
+    import hglib
     supp_path = os.path.join(os.environ["YT_DEST"], "src",
                              "yt-supplemental")
     # Now we check that the supplemental repository is checked out.
@@ -605,8 +614,8 @@
             print("%s" % (supp_path))
             print()
             sys.exit(1)
-        rv = commands.clone(uu,
-                "http://bitbucket.org/yt_analysis/yt-supplemental/", supp_path)
+        rv = hglib.clone("http://bitbucket.org/yt_analysis/yt-supplemental/", 
+                         supp_path)
         if rv:
             print("Something has gone wrong.  Quitting.")
             sys.exit(1)

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/pmods.py
--- a/yt/pmods.py
+++ b/yt/pmods.py
@@ -193,9 +193,13 @@
 """
 from __future__ import print_function
 
-import sys, imp, types
+import sys
+import imp
+import types
 from yt.extern.six.moves import builtins
 from mpi4py import MPI
+
+
 class mpi(object):
     rank = MPI.COMM_WORLD.Get_rank()
     @staticmethod
@@ -219,7 +223,7 @@
 
     def callAfterImport(self,f):
         "Add f to the list of functions to call on exit"
-        if type(f) != types.FunctionType:
+        if not isinstance(f, types.FunctionType):
             raise TypeError("Argument must be a function!")
         self.__funcs.append(f)
 
@@ -289,7 +293,7 @@
 # The remaining functions are taken unmodified (except for the names)
 # from knee.py.
 def __determine_parent__(globals, level):
-    if not globals or  "__name__" not in globals:
+    if not globals or "__name__" not in globals:
         return None
     pname = globals['__name__']
     if "__path__" in globals:
@@ -363,5 +367,5 @@
 # Now we import all the yt.mods items.
 with mpi_import():
     if MPI.COMM_WORLD.rank == 0: print("Beginning parallel import block.")
-    from yt.mods import *
+    from yt.mods import *  # NOQA
     if MPI.COMM_WORLD.rank == 0: print("Ending parallel import block.")

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/setup.py
--- a/yt/setup.py
+++ b/yt/setup.py
@@ -1,7 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
 
 
 def configuration(parent_package='', top_path=None):
@@ -15,6 +12,7 @@
     config.add_subpackage('geometry')
     config.add_subpackage('units')
     config.add_subpackage('utilities')
+    config.add_subpackage('tests')
     config.add_subpackage('visualization')
     config.make_config_py()
     #config.make_svn_version_py()

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -16,17 +16,27 @@
 
 # This handles the command line.
 
-import argparse, os, sys
+import argparse
+import os
+import sys
+import signal
 
 from yt.config import ytcfg
-from yt.funcs import *
+from yt.funcs import \
+    mylog, \
+    signal_print_traceback, \
+    signal_ipython, \
+    paste_traceback, \
+    paste_traceback_detailed
+from yt.utilities import rpdb
 
 exe_name = os.path.basename(sys.executable)
 # At import time, we determined whether or not we're being run in parallel.
 def turn_on_parallelism():
     parallel_capable = False
     try:
-        from mpi4py import MPI
+        # we import this to check if mpi4py is installed
+        from mpi4py import MPI  # NOQA
     except ImportError as e:
         mylog.error("Warning: Attempting to turn on parallelism, " +
                     "but mpi4py import failed. Try pip install mpi4py.")
@@ -65,7 +75,8 @@
             sys.excepthook = paste_traceback_detailed
             mylog.debug("Enabling detailed traceback pasting")
         elif self.dest == "detailed":
-            import cgitb; cgitb.enable(format="text")
+            import cgitb
+            cgitb.enable(format="text")
             mylog.debug("Enabling detailed traceback reporting")
         elif self.dest == "rpdb":
             sys.excepthook = rpdb.rpdb_excepthook
@@ -128,7 +139,7 @@
     help_parser.set_defaults(func=print_help)
 
 
-if parallel_capable == True:
+if parallel_capable is True:
     pass
 elif exe_name in \
         ["mpi4py", "embed_enzo",

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -19,16 +19,21 @@
 import numpy as np
 import importlib
 import os
-from yt.funcs import *
+from yt.funcs import iterable
 from yt.config import ytcfg
-from numpy.testing import assert_array_equal, assert_almost_equal, \
-    assert_approx_equal, assert_array_almost_equal, assert_equal, \
-    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
-    assert_allclose, assert_raises
-from yt.units.yt_array import uconcatenate
-import yt.fields.api as field_api
+# we import this in a weird way from numpy.testing to avoid triggering
+# flake8 errors from the unused imports. These test functions are imported
+# elsewhere in yt from here so we want them to be imported here.
+from numpy.testing import assert_array_equal, assert_almost_equal  # NOQA
+from numpy.testing import assert_approx_equal, assert_array_almost_equal  # NOQA
+from numpy.testing import assert_equal, assert_array_less  # NOQA
+from numpy.testing import assert_string_equal  # NOQA
+from numpy.testing import assert_array_almost_equal_nulp  # NOQA
+from numpy.testing import assert_allclose, assert_raises  # NOQA
+from nose.tools import assert_true, assert_less_equal  # NOQA
 from yt.convenience import load
-
+from yt.units.yt_array import YTArray, YTQuantity
+from yt.utilities.exceptions import YTUnitOperationError
 
 def assert_rel_equal(a1, a2, decimals, err_msg='', verbose=True):
     # We have nan checks in here because occasionally we have fields that get
@@ -728,7 +733,10 @@
 
 def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False,
              call_pdb = False):
-    import nose, os, sys, yt
+    import nose
+    import os
+    import sys
+    import yt
     from yt.funcs import mylog
     orig_level = mylog.getEffectiveLevel()
     mylog.setLevel(50)

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -37,7 +37,7 @@
 from yt.units.unit_lookup_table import \
     default_unit_symbol_lut, unit_prefixes, prefixable_units
 # unit definitions
-from yt.utilities.physical_constants import \
+from yt.utilities.physical_ratios import \
     cm_per_pc, sec_per_year, cm_per_km, cm_per_mpc, \
     mass_sun_grams
 

diff -r 474d4b7d547e3fd4f858659a674d8bfd893482e3 -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -21,6 +21,7 @@
 import matplotlib
 import os
 
+from collections import defaultdict
 from functools import wraps
 from matplotlib.font_manager import FontProperties
 
@@ -28,7 +29,7 @@
 from .tick_locators import LogLocator, LinearLocator
 
 from yt.funcs import \
-    defaultdict, get_image_suffix, \
+    get_image_suffix, \
     get_ipython_api_version, iterable, \
     ensure_list
 from yt.utilities.exceptions import \


https://bitbucket.org/yt_analysis/yt/commits/3231c7de2e2e/
Changeset:   3231c7de2e2e
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 03:26:23+00:00
Summary:     Small adjustments to flake8 fules, adding clarifying comments to explain why
we ignore certain files
Affected #:  1 file

diff -r 1415ed1a898c29cebb53686c1c72c2ee686decd2 -r 3231c7de2e2e7372b08be8fc4cd26aed728369a4 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -9,7 +9,11 @@
 with-xunit=1
 
 [flake8]
-# if we include api.py files, we get tons of spurious "imported but unused" errors
-exclude = */api.py,*/__config__.py,yt/visualization/_mpl_imports.py
+# we exclude:
+#      api.py and __init__.py files to avoid spurious unused import errors
+#      _mpl_imports.py for the same reason
+#      autogenerated __config__.py files
+#      vendored libraries
+exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
 max-line-length=999
-ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E302,E303,E401,E502,E701,E703,W291,W293,W391
\ No newline at end of file
+ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E302,E303,E502,E701,E703,W291,W293,W391
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/f5a1d02e9862/
Changeset:   f5a1d02e9862
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 03:26:58+00:00
Summary:     Adding a test for flake8 issues to the unit tests
Affected #:  2 files

diff -r 3231c7de2e2e7372b08be8fc4cd26aed728369a4 -r f5a1d02e98625290b47c95a2aef3f3fee031e569 yt/tests/__init__.py
--- /dev/null
+++ b/yt/tests/__init__.py
@@ -0,0 +1,1 @@
+"""Tests for generic yt functionality and codebase-wide tests"""

diff -r 3231c7de2e2e7372b08be8fc4cd26aed728369a4 -r f5a1d02e98625290b47c95a2aef3f3fee031e569 yt/tests/test_flake8.py
--- /dev/null
+++ b/yt/tests/test_flake8.py
@@ -0,0 +1,21 @@
+import subprocess
+import yt
+import os
+
+from yt.testing import requires_module
+
+
+ at requires_module('flake8')
+def test_flake8():
+    yt_dir = os.path.dirname(os.path.abspath(yt.__file__))
+    initial_dir = os.getcwd()
+    os.chdir(yt_dir)
+    output_file = os.path.sep.join([os.path.dirname(initial_dir), 'flake8.out'])
+    output_string = "--output-file=%s" % output_file
+    subprocess.call(['flake8', output_string, os.curdir])
+    os.chdir(initial_dir)
+    with open(output_file) as f:
+        flake8_output = f.readlines()
+    if flake8_output != []:
+        raise AssertionError(
+            "flake8 found style errors:\n\n%s" % "\n".join(flake8_output))


https://bitbucket.org/yt_analysis/yt/commits/51e4743d3482/
Changeset:   51e4743d3482
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 04:10:10+00:00
Summary:     Updating the style guide. Deleting the copy of the style guide in the docs and including the guide directly.
Affected #:  2 files

diff -r f5a1d02e98625290b47c95a2aef3f3fee031e569 -r 51e4743d3482d6a9d877d2ebd028595485269a7e doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -6,75 +6,96 @@
 
  * In general, follow PEP-8 guidelines.
    http://www.python.org/dev/peps/pep-0008/
- * Classes are ConjoinedCapitals, methods and functions are
-   lowercase_with_underscores.
+ * Classes are ``ConjoinedCapitals``, methods and functions are
+   ``lowercase_with_underscores``.
  * Use 4 spaces, not tabs, to represent indentation.
  * Line widths should not be more than 80 characters.
  * Do not use nested classes unless you have a very good reason to, such as
    requiring a namespace or class-definition modification.  Classes should live
-   at the top level.  __metaclass__ is exempt from this.
- * Do not use unnecessary parenthesis in conditionals.  if((something) and
-   (something_else)) should be rewritten as if something and something_else.
-   Python is more forgiving than C.
+   at the top level.  ``__metaclass__`` is exempt from this.
+ * Do not use unnecessary parenthesis in conditionals.  ``if((something) and
+   (something_else))`` should be rewritten as 
+   ``if something and something_else``. Python is more forgiving than C.
  * Avoid copying memory when possible. For example, don't do 
-   "a = a.reshape(3,4)" when "a.shape = (3,4)" will do, and "a = a * 3" should
-   be "np.multiply(a, 3, a)".
- * In general, avoid all double-underscore method names: __something is usually
-   unnecessary.
+   ``a = a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3``
+   should be ``np.multiply(a, 3, a)``.
+ * In general, avoid all double-underscore method names: ``__something`` is 
+   usually unnecessary.
  * When writing a subclass, use the super built-in to access the super class,
-   rather than explicitly. Ex: "super(SpecialGrid, self).__init__()" rather than
-   "SpecialGrid.__init__()".
- * Doc strings should describe input, output, behavior, and any state changes
-   that occur on an object.  See the file `doc/docstring_example.txt` for a
+   rather than explicitly. Ex: ``super(SpecialGridSubclass, self).__init__()``
+   rather than ``SpecialGrid.__init__()``.
+ * Docstrings should describe input, output, behavior, and any state changes
+   that occur on an object.  See the file ``doc/docstring_example.txt`` for a
    fiducial example of a docstring.
+ * Use only one top-level import per line. Unless there is a good reason not to,
+   imports should happen at the top of the file, after the copyright blurb.
+ * Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use 
+   ``is`` or ``is not``. 
+ * If you are comparing with a numpy boolean array, just refer to the array. 
+   Ex: do ``np.all(array)`` instead of ``np.all(array == True)``.
+ * Never comapre with None using ``==`` or ``!=``, use ``is None`` or 
+   ``is not None``.
+ * Use ``statement is not True`` instead of ``not statement is True``
+ * Only one statement per line, do not use semicolons to put two or more 
+   statements on a single line.
+ * Only declare local variables if they will be used later. If you do not use the
+   return value of a function, do not store it in a variable.
+ * Add tests for new functionality. When fixing a bug, consider adding a test to
+   prevent the bug from recurring.
 
 API Guide
 ---------
 
- * Do not import "*" from anything other than "yt.funcs".
+ * Do not use ``from some_module import *``
  * Internally, only import from source files directly -- instead of:
 
-   from yt.visualization.api import ProjectionPlot
+     ``from yt.visualization.api import ProjectionPlot``
 
    do:
 
-   from yt.visualization.plot_window import ProjectionPlot
+     ``from yt.visualization.plot_window import ProjectionPlot``
 
- * Numpy is to be imported as "np", after a long time of using "na".
+ * Import symbols from the module where they are defined, avoid transitive 
+   imports.
+ * Import standard library modules, functions, and classes from builtins, do not
+   import them from other yt files.
+ * Numpy is to be imported as ``np``.
  * Do not use too many keyword arguments.  If you have a lot of keyword
-   arguments, then you are doing too much in __init__ and not enough via
+   arguments, then you are doing too much in ``__init__`` and not enough via
    parameter setting.
- * In function arguments, place spaces before commas.  def something(a,b,c)
-   should be def something(a, b, c).
+ * In function arguments, place spaces before commas.  ``def something(a,b,c)``
+   should be ``def something(a, b, c)``.
  * Don't create a new class to replicate the functionality of an old class --
    replace the old class.  Too many options makes for a confusing user
    experience.
  * Parameter files external to yt are a last resort.
- * The usage of the **kwargs construction should be avoided.  If they cannot
+ * The usage of the ``**kwargs`` construction should be avoided.  If they cannot
    be avoided, they must be explained, even if they are only to be passed on to
    a nested function.
 
 Variable Names and Enzo-isms
 ----------------------------
+Avoid Enzo-isms.  This includes but is not limited to:
 
- * Avoid Enzo-isms.  This includes but is not limited to:
-   * Hard-coding parameter names that are the same as those in Enzo.  The
-     following translation table should be of some help.  Note that the
-     parameters are now properties on a Dataset subclass: you access them
-     like ds.refine_by .
-     * RefineBy => refine_by
-     * TopGridRank => dimensionality
-     * TopGridDimensions => domain_dimensions
-     * InitialTime => current_time
-     * DomainLeftEdge => domain_left_edge
-     * DomainRightEdge => domain_right_edge
-     * CurrentTimeIdentifier => unique_identifier
-     * CosmologyCurrentRedshift => current_redshift
-     * ComovingCoordinates => cosmological_simulation
-     * CosmologyOmegaMatterNow => omega_matter
-     * CosmologyOmegaLambdaNow => omega_lambda
-     * CosmologyHubbleConstantNow => hubble_constant
-   * Do not assume that the domain runs from 0 .. 1.  This is not true
-     everywhere.
+ * Hard-coding parameter names that are the same as those in Enzo.  The
+   following translation table should be of some help.  Note that the
+   parameters are now properties on a ``Dataset`` subclass: you access them
+   like ds.refine_by .
+
+    - ``RefineBy `` => `` refine_by``
+    - ``TopGridRank `` => `` dimensionality``
+    - ``TopGridDimensions `` => `` domain_dimensions``
+    - ``InitialTime `` => `` current_time``
+    - ``DomainLeftEdge `` => `` domain_left_edge``
+    - ``DomainRightEdge `` => `` domain_right_edge``
+    - ``CurrentTimeIdentifier `` => `` unique_identifier``
+    - ``CosmologyCurrentRedshift `` => `` current_redshift``
+    - ``ComovingCoordinates `` => `` cosmological_simulation``
+    - ``CosmologyOmegaMatterNow `` => `` omega_matter``
+    - ``CosmologyOmegaLambdaNow `` => `` omega_lambda``
+    - ``CosmologyHubbleConstantNow `` => `` hubble_constant``
+ 
+ * Do not assume that the domain runs from 0 .. 1.  This is not true
+   everywhere.
  * Variable names should be short but descriptive.
  * No globals!

diff -r f5a1d02e98625290b47c95a2aef3f3fee031e569 -r 51e4743d3482d6a9d877d2ebd028595485269a7e doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -494,80 +494,4 @@
 
 .. _code-style-guide:
 
-Code Style Guide
-----------------
-
-To keep things tidy, we try to stick with a couple simple guidelines.
-
-General Guidelines
-++++++++++++++++++
-
-* In general, follow `PEP-8 <http://www.python.org/dev/peps/pep-0008/>`_ guidelines.
-* Classes are ConjoinedCapitals, methods and functions are
-  ``lowercase_with_underscores.``
-* Use 4 spaces, not tabs, to represent indentation.
-* Line widths should not be more than 80 characters.
-* Do not use nested classes unless you have a very good reason to, such as
-  requiring a namespace or class-definition modification.  Classes should live
-  at the top level.  ``__metaclass__`` is exempt from this.
-* Do not use unnecessary parentheses in conditionals.  ``if((something) and
-  (something_else))`` should be rewritten as ``if something and
-  something_else``.  Python is more forgiving than C.
-* Avoid copying memory when possible. For example, don't do ``a =
-  a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3`` should be
-  ``np.multiply(a, 3, a)``.
-* In general, avoid all double-underscore method names: ``__something`` is
-  usually unnecessary.
-* Doc strings should describe input, output, behavior, and any state changes
-  that occur on an object.  See the file `doc/docstring_example.txt` for a
-  fiducial example of a docstring.
-
-API Guide
-+++++++++
-
-* Do not import "*" from anything other than ``yt.funcs``.
-* Internally, only import from source files directly; instead of: ``from
-  yt.visualization.api import SlicePlot`` do
-  ``from yt.visualization.plot_window import SlicePlot``.
-* Numpy is to be imported as ``np``.
-* Do not use too many keyword arguments.  If you have a lot of keyword
-  arguments, then you are doing too much in ``__init__`` and not enough via
-  parameter setting.
-* In function arguments, place spaces before commas.  ``def something(a,b,c)``
-  should be ``def something(a, b, c)``.
-* Don't create a new class to replicate the functionality of an old class --
-  replace the old class.  Too many options makes for a confusing user
-  experience.
-* Parameter files external to yt are a last resort.
-* The usage of the ``**kwargs`` construction should be avoided.  If they
-  cannot be avoided, they must be explained, even if they are only to be
-  passed on to a nested function.
-* Constructor APIs should be kept as *simple* as possible.
-* Variable names should be short but descriptive.
-* No global variables!
-
-Variable Names and Enzo-isms
-++++++++++++++++++++++++++++
-
-* Avoid Enzo-isms.  This includes but is not limited to:
-
-  + Hard-coding parameter names that are the same as those in Enzo.  The
-    following translation table should be of some help.  Note that the
-    parameters are now properties on a Dataset subclass: you access them
-    like ``ds.refine_by`` .
-
-    - ``RefineBy `` => `` refine_by``
-    - ``TopGridRank `` => `` dimensionality``
-    - ``TopGridDimensions `` => `` domain_dimensions``
-    - ``InitialTime `` => `` current_time``
-    - ``DomainLeftEdge `` => `` domain_left_edge``
-    - ``DomainRightEdge `` => `` domain_right_edge``
-    - ``CurrentTimeIdentifier `` => `` unique_identifier``
-    - ``CosmologyCurrentRedshift `` => `` current_redshift``
-    - ``ComovingCoordinates `` => `` cosmological_simulation``
-    - ``CosmologyOmegaMatterNow `` => `` omega_matter``
-    - ``CosmologyOmegaLambdaNow `` => `` omega_lambda``
-    - ``CosmologyHubbleConstantNow `` => `` hubble_constant``
-
-  + Do not assume that the domain runs from 0 to 1.  This is not true
-    for many codes and datasets.
+.. include:: ../../coding_styleguide.txt


https://bitbucket.org/yt_analysis/yt/commits/ba37f4a5a3fb/
Changeset:   ba37f4a5a3fb
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 04:26:17+00:00
Summary:     Add missing defaultdict import
Affected #:  1 file

diff -r 51e4743d3482d6a9d877d2ebd028595485269a7e -r ba37f4a5a3fb2223edb15c3a8aa35ea7005b9e60 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -27,6 +27,8 @@
 import tempfile
 import glob
 
+from collections import defaultdict
+
 from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
 from yt.funcs import \


https://bitbucket.org/yt_analysis/yt/commits/e6b404f25d5c/
Changeset:   e6b404f25d5c
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 04:40:46+00:00
Summary:     Adding a few more ignored rules
Affected #:  1 file

diff -r ba37f4a5a3fb2223edb15c3a8aa35ea7005b9e60 -r e6b404f25d5cd6e3dd683c38560b1a875b974ee9 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -16,4 +16,4 @@
 #      vendored libraries
 exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
 max-line-length=999
-ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E302,E303,E502,E701,E703,W291,W293,W391
\ No newline at end of file
+ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E402,E502,E701,E703,E731,W291,W293,W391,W503
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/cad0267558f8/
Changeset:   cad0267558f8
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 04:40:56+00:00
Summary:     Fixing a number of issues caught by the test bot.
Affected #:  7 files

diff -r e6b404f25d5cd6e3dd683c38560b1a875b974ee9 -r cad0267558f8d48c931893451ab4d040d050da82 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -281,8 +281,6 @@
         errSq=sum(dif**2)
 
         if any(linesP[:,1]==speciesDict['init_b']):
-         #   linesP = prevLinesP
-
             flag = True
             break
             

diff -r e6b404f25d5cd6e3dd683c38560b1a875b974ee9 -r cad0267558f8d48c931893451ab4d040d050da82 yt/analysis_modules/photon_simulator/tests/test_cluster.py
--- a/yt/analysis_modules/photon_simulator/tests/test_cluster.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_cluster.py
@@ -58,8 +58,10 @@
                                      responses=[ARF,RMF],
                                      absorb_model=tbabs_model)
 
-    def photons_test(): return photons.photons
-    def events_test(): return events.events
+    def photons_test():
+        return photons.photons
+    def events_test():
+        return events.events
 
     for test in [GenericArrayTest(ds, photons_test),
                  GenericArrayTest(ds, events_test)]:

diff -r e6b404f25d5cd6e3dd683c38560b1a875b974ee9 -r cad0267558f8d48c931893451ab4d040d050da82 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -197,7 +197,7 @@
         elif isinstance(center, string_types):
             if center.lower() in ("c", "center"):
                 self.center = self.ds.domain_center
-             # is this dangerous for race conditions?
+            # is this dangerous for race conditions?
             elif center.lower() in ("max", "m"):
                 self.center = self.ds.find_max(("gas", "density"))[1]
             elif center.startswith("max_"):

diff -r e6b404f25d5cd6e3dd683c38560b1a875b974ee9 -r cad0267558f8d48c931893451ab4d040d050da82 yt/data_objects/tests/test_connected_sets.py
--- a/yt/data_objects/tests/test_connected_sets.py
+++ b/yt/data_objects/tests/test_connected_sets.py
@@ -1,7 +1,8 @@
 from yt.utilities.answer_testing.level_sets_tests import \
-     ExtractConnectedSetsTest, \
-     requires_ds, \
-     data_dir_load
+    ExtractConnectedSetsTest
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    data_dir_load
 
 g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
 @requires_ds(g30, big_data=True)

diff -r e6b404f25d5cd6e3dd683c38560b1a875b974ee9 -r cad0267558f8d48c931893451ab4d040d050da82 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -66,6 +66,7 @@
 
 def realistic_ds(fields, particle_fields, nprocs):
     np.random.seed(int(0x4d3d3d3))
+    global base_ds
     units = [base_ds._get_field_info(*f).units for f in fields]
     punits = [base_ds._get_field_info('io', f).units for f in particle_fields]
     fields = [_strip_ftype(f) for f in fields]
@@ -110,7 +111,7 @@
         self.nproc = nproc
 
     def __call__(self):
-
+        global base_ds
         field = base_ds._get_field_info(*self.field_name)
         deps = field.get_dependencies(ds = base_ds)
         requested = deps.requested
@@ -173,6 +174,7 @@
                 assert_array_almost_equal_nulp(v1, res, 4)
 
 def test_all_fields():
+    global base_ds
     for field in sorted(base_ds.field_info):
         if field[1].find("beta_p") > -1:
             continue
@@ -186,6 +188,7 @@
             yield TestFieldAccess(field, nproc)
 
 def test_add_deposited_particle_field():
+    global base_ds
     fn = base_ds.add_deposited_particle_field(('io', 'particle_ones'), 'count')
     assert_equal(fn, ('deposit', 'io_count_ones'))
     ad = base_ds.all_data()
@@ -202,6 +205,7 @@
     assert_almost_equal(ret.sum(), 3824750.912653606)
 
 def test_add_gradient_fields():
+    global base_ds
     gfields = base_ds.add_gradient_fields(("gas","density"))
     gfields += base_ds.add_gradient_fields(("index", "ones"))
     field_list = [('gas', 'density_gradient_x'),

diff -r e6b404f25d5cd6e3dd683c38560b1a875b974ee9 -r cad0267558f8d48c931893451ab4d040d050da82 yt/utilities/grid_data_format/conversion/conversion_athena.py
--- a/yt/utilities/grid_data_format/conversion/conversion_athena.py
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -265,7 +265,7 @@
                     self.write_gdf_field(gdf_name, i, field+'_z', data_z)
                     del data, data_x, data_y, data_z
                 del line
-                line = f.readline()
+                line = f.readline()  # NOQA
             f.close()
             del f
 

diff -r e6b404f25d5cd6e3dd683c38560b1a875b974ee9 -r cad0267558f8d48c931893451ab4d040d050da82 yt/utilities/tests/test_flagging_methods.py
--- a/yt/utilities/tests/test_flagging_methods.py
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -9,6 +9,7 @@
     ds.index
 
 def test_over_density():
+    global ds
     od_flag = flagging_method_registry["overdensity"](0.75) 
     criterion = (ds.index.grids[0]["density"] > 0.75)
     assert( np.all( od_flag(ds.index.grids[0]) == criterion) )


https://bitbucket.org/yt_analysis/yt/commits/bd73e1357e84/
Changeset:   bd73e1357e84
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 04:44:05+00:00
Summary:     Fixing an import issue in the halo machinery
Affected #:  1 file

diff -r cad0267558f8d48c931893451ab4d040d050da82 -r bd73e1357e846839831c4a79438c4b9ed9021e9d yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -30,10 +30,10 @@
     get_rotation_matrix, \
     periodic_dist
 from yt.utilities.physical_constants import \
-    mass_sun_cgs, \
+    mass_sun_cgs
+from yt.utilities.physical_ratios import \
+    rho_crit_g_cm3_h2, \
     TINY
-from yt.utilities.physical_ratios import \
-    rho_crit_g_cm3_h2
 
 from .hop.EnzoHop import RunHOP
 from .fof.EnzoFOF import RunFOF


https://bitbucket.org/yt_analysis/yt/commits/f0648097f5ee/
Changeset:   f0648097f5ee
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 16:42:38+00:00
Summary:     Fixing remaining flake8 errors on the build machine
Affected #:  3 files

diff -r bd73e1357e846839831c4a79438c4b9ed9021e9d -r f0648097f5ee533f231299a4cad6d2d4ca82bbad yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -20,6 +20,9 @@
     YTFieldUnitError, \
     YTFieldUnitParseError
 
+base_ds = None
+
+
 def setup():
     global base_ds
     # Make this super teeny tiny

diff -r bd73e1357e846839831c4a79438c4b9ed9021e9d -r f0648097f5ee533f231299a4cad6d2d4ca82bbad yt/utilities/spatial/kdtree.py
--- a/yt/utilities/spatial/kdtree.py
+++ b/yt/utilities/spatial/kdtree.py
@@ -175,11 +175,16 @@
 
     class node(object):
         if sys.version_info[0] >= 3:
-            def __lt__(self, other): id(self) < id(other)
-            def __gt__(self, other): id(self) > id(other)
-            def __le__(self, other): id(self) <= id(other)
-            def __ge__(self, other): id(self) >= id(other)
-            def __eq__(self, other): id(self) == id(other)
+            def __lt__(self, other):
+                id(self) < id(other)
+            def __gt__(self, other):
+                id(self) > id(other)
+            def __le__(self, other):
+                id(self) <= id(other)
+            def __ge__(self, other):
+                id(self) >= id(other)
+            def __eq__(self, other):
+                id(self) == id(other)
 
     class leafnode(node):
         def __init__(self, idx):

diff -r bd73e1357e846839831c4a79438c4b9ed9021e9d -r f0648097f5ee533f231299a4cad6d2d4ca82bbad yt/utilities/tests/test_flagging_methods.py
--- a/yt/utilities/tests/test_flagging_methods.py
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -3,13 +3,10 @@
 from yt.testing import fake_random_ds
 from yt.utilities.flagging_methods import flagging_method_registry
 
-def setup():
-    global ds
+
+def test_over_density():
     ds = fake_random_ds(64)
     ds.index
-
-def test_over_density():
-    global ds
     od_flag = flagging_method_registry["overdensity"](0.75) 
     criterion = (ds.index.grids[0]["density"] > 0.75)
     assert( np.all( od_flag(ds.index.grids[0]) == criterion) )


https://bitbucket.org/yt_analysis/yt/commits/0a65d74040da/
Changeset:   0a65d74040da
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 16:45:15+00:00
Summary:     Removing some no-op code in YTQuadTreeProj.get_data()
Affected #:  1 file

diff -r f0648097f5ee533f231299a4cad6d2d4ca82bbad -r 0a65d74040da3a01991f532b01629c690a0ec5e1 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -369,14 +369,13 @@
         data['pdy'] = self.ds.arr(pdy, code_length)
         data['fields'] = nvals
         # Now we run the finalizer, which is ignored if we don't need it
-        data['fields']
         field_data = np.hsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
-            self.ds._get_field_info(*field)
             mylog.debug("Setting field %s", field)
             input_units = self._projected_units[field]
             self[field] = self.ds.arr(field_data[fi].ravel(), input_units)
-        for i in list(data.keys()): self[i] = data.pop(i)
+        for i in list(data.keys()):
+            self[i] = data.pop(i)
         mylog.info("Projection completed")
         self.tree = tree
 


https://bitbucket.org/yt_analysis/yt/commits/2e1165bb32c4/
Changeset:   2e1165bb32c4
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 19:21:13+00:00
Summary:     Moving the style guide to the root of the repository.
Affected #:  3 files

diff -r 0a65d74040da3a01991f532b01629c690a0ec5e1 -r 2e1165bb32c4cce84f8977752fe54fc9a34627b1 coding_styleguide.txt
--- /dev/null
+++ b/coding_styleguide.txt
@@ -0,0 +1,101 @@
+Style Guide for Coding in yt
+============================
+
+Coding Style Guide
+------------------
+
+ * In general, follow PEP-8 guidelines.
+   http://www.python.org/dev/peps/pep-0008/
+ * Classes are ``ConjoinedCapitals``, methods and functions are
+   ``lowercase_with_underscores``.
+ * Use 4 spaces, not tabs, to represent indentation.
+ * Line widths should not be more than 80 characters.
+ * Do not use nested classes unless you have a very good reason to, such as
+   requiring a namespace or class-definition modification.  Classes should live
+   at the top level.  ``__metaclass__`` is exempt from this.
+ * Do not use unnecessary parenthesis in conditionals.  ``if((something) and
+   (something_else))`` should be rewritten as
+   ``if something and something_else``. Python is more forgiving than C.
+ * Avoid copying memory when possible. For example, don't do
+   ``a = a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3``
+   should be ``np.multiply(a, 3, a)``.
+ * In general, avoid all double-underscore method names: ``__something`` is
+   usually unnecessary.
+ * When writing a subclass, use the super built-in to access the super class,
+   rather than explicitly. Ex: ``super(SpecialGridSubclass, self).__init__()``
+   rather than ``SpecialGrid.__init__()``.
+ * Docstrings should describe input, output, behavior, and any state changes
+   that occur on an object.  See the file ``doc/docstring_example.txt`` for a
+   fiducial example of a docstring.
+ * Use only one top-level import per line. Unless there is a good reason not to,
+   imports should happen at the top of the file, after the copyright blurb.
+ * Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use
+   ``is`` or ``is not``.
+ * If you are comparing with a numpy boolean array, just refer to the array.
+   Ex: do ``np.all(array)`` instead of ``np.all(array == True)``.
+ * Never comapre with None using ``==`` or ``!=``, use ``is None`` or
+   ``is not None``.
+ * Use ``statement is not True`` instead of ``not statement is True``
+ * Only one statement per line, do not use semicolons to put two or more
+   statements on a single line.
+ * Only declare local variables if they will be used later. If you do not use the
+   return value of a function, do not store it in a variable.
+ * Add tests for new functionality. When fixing a bug, consider adding a test to
+   prevent the bug from recurring.
+
+API Guide
+---------
+
+ * Do not use ``from some_module import *``
+ * Internally, only import from source files directly -- instead of:
+
+     ``from yt.visualization.api import ProjectionPlot``
+
+   do:
+
+     ``from yt.visualization.plot_window import ProjectionPlot``
+
+ * Import symbols from the module where they are defined, avoid transitive
+   imports.
+ * Import standard library modules, functions, and classes from builtins, do not
+   import them from other yt files.
+ * Numpy is to be imported as ``np``.
+ * Do not use too many keyword arguments.  If you have a lot of keyword
+   arguments, then you are doing too much in ``__init__`` and not enough via
+   parameter setting.
+ * In function arguments, place spaces before commas.  ``def something(a,b,c)``
+   should be ``def something(a, b, c)``.
+ * Don't create a new class to replicate the functionality of an old class --
+   replace the old class.  Too many options makes for a confusing user
+   experience.
+ * Parameter files external to yt are a last resort.
+ * The usage of the ``**kwargs`` construction should be avoided.  If they cannot
+   be avoided, they must be explained, even if they are only to be passed on to
+   a nested function.
+
+Variable Names and Enzo-isms
+----------------------------
+Avoid Enzo-isms.  This includes but is not limited to:
+
+ * Hard-coding parameter names that are the same as those in Enzo.  The
+   following translation table should be of some help.  Note that the
+   parameters are now properties on a ``Dataset`` subclass: you access them
+   like ds.refine_by .
+
+    - ``RefineBy `` => `` refine_by``
+    - ``TopGridRank `` => `` dimensionality``
+    - ``TopGridDimensions `` => `` domain_dimensions``
+    - ``InitialTime `` => `` current_time``
+    - ``DomainLeftEdge `` => `` domain_left_edge``
+    - ``DomainRightEdge `` => `` domain_right_edge``
+    - ``CurrentTimeIdentifier `` => `` unique_identifier``
+    - ``CosmologyCurrentRedshift `` => `` current_redshift``
+    - ``ComovingCoordinates `` => `` cosmological_simulation``
+    - ``CosmologyOmegaMatterNow `` => `` omega_matter``
+    - ``CosmologyOmegaLambdaNow `` => `` omega_lambda``
+    - ``CosmologyHubbleConstantNow `` => `` hubble_constant``
+
+ * Do not assume that the domain runs from 0 .. 1.  This is not true
+   everywhere.
+ * Variable names should be short but descriptive.
+ * No globals!

diff -r 0a65d74040da3a01991f532b01629c690a0ec5e1 -r 2e1165bb32c4cce84f8977752fe54fc9a34627b1 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ /dev/null
@@ -1,101 +0,0 @@
-Style Guide for Coding in yt
-============================
-
-Coding Style Guide
-------------------
-
- * In general, follow PEP-8 guidelines.
-   http://www.python.org/dev/peps/pep-0008/
- * Classes are ``ConjoinedCapitals``, methods and functions are
-   ``lowercase_with_underscores``.
- * Use 4 spaces, not tabs, to represent indentation.
- * Line widths should not be more than 80 characters.
- * Do not use nested classes unless you have a very good reason to, such as
-   requiring a namespace or class-definition modification.  Classes should live
-   at the top level.  ``__metaclass__`` is exempt from this.
- * Do not use unnecessary parenthesis in conditionals.  ``if((something) and
-   (something_else))`` should be rewritten as 
-   ``if something and something_else``. Python is more forgiving than C.
- * Avoid copying memory when possible. For example, don't do 
-   ``a = a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3``
-   should be ``np.multiply(a, 3, a)``.
- * In general, avoid all double-underscore method names: ``__something`` is 
-   usually unnecessary.
- * When writing a subclass, use the super built-in to access the super class,
-   rather than explicitly. Ex: ``super(SpecialGridSubclass, self).__init__()``
-   rather than ``SpecialGrid.__init__()``.
- * Docstrings should describe input, output, behavior, and any state changes
-   that occur on an object.  See the file ``doc/docstring_example.txt`` for a
-   fiducial example of a docstring.
- * Use only one top-level import per line. Unless there is a good reason not to,
-   imports should happen at the top of the file, after the copyright blurb.
- * Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use 
-   ``is`` or ``is not``. 
- * If you are comparing with a numpy boolean array, just refer to the array. 
-   Ex: do ``np.all(array)`` instead of ``np.all(array == True)``.
- * Never comapre with None using ``==`` or ``!=``, use ``is None`` or 
-   ``is not None``.
- * Use ``statement is not True`` instead of ``not statement is True``
- * Only one statement per line, do not use semicolons to put two or more 
-   statements on a single line.
- * Only declare local variables if they will be used later. If you do not use the
-   return value of a function, do not store it in a variable.
- * Add tests for new functionality. When fixing a bug, consider adding a test to
-   prevent the bug from recurring.
-
-API Guide
----------
-
- * Do not use ``from some_module import *``
- * Internally, only import from source files directly -- instead of:
-
-     ``from yt.visualization.api import ProjectionPlot``
-
-   do:
-
-     ``from yt.visualization.plot_window import ProjectionPlot``
-
- * Import symbols from the module where they are defined, avoid transitive 
-   imports.
- * Import standard library modules, functions, and classes from builtins, do not
-   import them from other yt files.
- * Numpy is to be imported as ``np``.
- * Do not use too many keyword arguments.  If you have a lot of keyword
-   arguments, then you are doing too much in ``__init__`` and not enough via
-   parameter setting.
- * In function arguments, place spaces before commas.  ``def something(a,b,c)``
-   should be ``def something(a, b, c)``.
- * Don't create a new class to replicate the functionality of an old class --
-   replace the old class.  Too many options makes for a confusing user
-   experience.
- * Parameter files external to yt are a last resort.
- * The usage of the ``**kwargs`` construction should be avoided.  If they cannot
-   be avoided, they must be explained, even if they are only to be passed on to
-   a nested function.
-
-Variable Names and Enzo-isms
-----------------------------
-Avoid Enzo-isms.  This includes but is not limited to:
-
- * Hard-coding parameter names that are the same as those in Enzo.  The
-   following translation table should be of some help.  Note that the
-   parameters are now properties on a ``Dataset`` subclass: you access them
-   like ds.refine_by .
-
-    - ``RefineBy `` => `` refine_by``
-    - ``TopGridRank `` => `` dimensionality``
-    - ``TopGridDimensions `` => `` domain_dimensions``
-    - ``InitialTime `` => `` current_time``
-    - ``DomainLeftEdge `` => `` domain_left_edge``
-    - ``DomainRightEdge `` => `` domain_right_edge``
-    - ``CurrentTimeIdentifier `` => `` unique_identifier``
-    - ``CosmologyCurrentRedshift `` => `` current_redshift``
-    - ``ComovingCoordinates `` => `` cosmological_simulation``
-    - ``CosmologyOmegaMatterNow `` => `` omega_matter``
-    - ``CosmologyOmegaLambdaNow `` => `` omega_lambda``
-    - ``CosmologyHubbleConstantNow `` => `` hubble_constant``
- 
- * Do not assume that the domain runs from 0 .. 1.  This is not true
-   everywhere.
- * Variable names should be short but descriptive.
- * No globals!

diff -r 0a65d74040da3a01991f532b01629c690a0ec5e1 -r 2e1165bb32c4cce84f8977752fe54fc9a34627b1 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -494,4 +494,4 @@
 
 .. _code-style-guide:
 
-.. include:: ../../coding_styleguide.txt
+.. include:: ../../../coding_styleguide.txt


https://bitbucket.org/yt_analysis/yt/commits/b232565b974c/
Changeset:   b232565b974c
Branch:      yt
User:        ngoldbaum
Date:        2015-11-06 20:49:18+00:00
Summary:     Remove old flake8 output files if they exist
Affected #:  1 file

diff -r 2e1165bb32c4cce84f8977752fe54fc9a34627b1 -r b232565b974cb1b79afec463c20312f3db8541a8 yt/tests/test_flake8.py
--- a/yt/tests/test_flake8.py
+++ b/yt/tests/test_flake8.py
@@ -11,6 +11,8 @@
     initial_dir = os.getcwd()
     os.chdir(yt_dir)
     output_file = os.path.sep.join([os.path.dirname(initial_dir), 'flake8.out'])
+    if os.path.exists(output_file):
+        os.remove(output_file)
     output_string = "--output-file=%s" % output_file
     subprocess.call(['flake8', output_string, os.curdir])
     os.chdir(initial_dir)


https://bitbucket.org/yt_analysis/yt/commits/c90653eed93b/
Changeset:   c90653eed93b
Branch:      yt
User:        ngoldbaum
Date:        2015-11-07 00:08:16+00:00
Summary:     Remove commented out print statements
Affected #:  1 file

diff -r b232565b974cb1b79afec463c20312f3db8541a8 -r c90653eed93b9a7661c08727d3ef9f67268c2b36 yt/utilities/grid_data_format/conversion/conversion_athena.py
--- a/yt/utilities/grid_data_format/conversion/conversion_athena.py
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -34,7 +34,6 @@
         self.handle = None
 
     def parse_line(self,line, grid):
-        # print line
         # grid is a dictionary
         splitup = line.strip().split()
         if "vtk" in splitup:
@@ -210,7 +209,6 @@
             #print 'Reading data from %s' % fn
             line = f.readline()
             while line is not '':
-                # print line
                 if len(line) == 0: break
                 splitup = line.strip().split()
 
@@ -310,7 +308,6 @@
 
 
     def parse_line(self, line, grid):
-        #    print line
         # grid is a dictionary
         splitup = line.strip().split()
         if "vtk" in splitup:
@@ -353,7 +350,6 @@
                 if 'TABLE' in line.strip().split():
                     table_read = True
                 if len(line) == 0: break
-            #    print line
 
             if len(line) == 0: break
             if np.prod(grid['dimensions']) != grid['ncells']:


https://bitbucket.org/yt_analysis/yt/commits/7d3c3b3e270c/
Changeset:   7d3c3b3e270c
Branch:      yt
User:        ngoldbaum
Date:        2015-11-15 15:19:02+00:00
Summary:     Merging with mainline, fixing conflicts
Affected #:  141 files

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -32,6 +32,7 @@
 yt/utilities/lib/CICDeposit.c
 yt/utilities/lib/ContourFinding.c
 yt/utilities/lib/DepthFirstOctree.c
+yt/utilities/lib/element_mappings.c
 yt/utilities/lib/FixedInterpolator.c
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -23,7 +23,7 @@
 DEST_SUFFIX="yt-conda"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="yt" # This is the branch to which we will forcibly update.
-INST_YT_SOURCE=1 # Do we do a source install of yt?
+INST_YT_SOURCE=0 # Do we do a source install of yt?
 
 ##################################################################
 #                                                                #
@@ -37,7 +37,7 @@
 # ( SOMECOMMAND 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
 MINICONDA_URLBASE="http://repo.continuum.io/miniconda"
-MINICONDA_VERSION="1.9.1"
+MINICONDA_VERSION="latest"
 YT_RECIPE_REPO="https://bitbucket.org/yt_analysis/yt_conda/raw/default"
 
 function do_exit
@@ -61,12 +61,14 @@
     ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
-function get_ytproject
-{
-    [ -e $1 ] && return
-    echo "Downloading $1 from yt-project.org"
-    ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
-    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+# These are needed to prevent pushd and popd from printing to stdout
+
+function pushd () {
+    command pushd "$@" > /dev/null
+}
+
+function popd () {
+    command popd "$@" > /dev/null
 }
 
 function get_ytdata
@@ -101,122 +103,125 @@
 echo "This will install Miniconda from Continuum Analytics, the necessary"
 echo "packages to run yt, and create a self-contained environment for you to"
 echo "use yt.  Additionally, Conda itself provides the ability to install"
-echo "many other packages that can be used for other purposes."
+echo "many other packages that can be used for other purposes using the"
+echo "'conda install' command."
 echo
 MYOS=`uname -s`       # A guess at the OS
-if [ "${MYOS##Darwin}" != "${MYOS}" ]
+if [ $INST_YT_SOURCE -ne 0 ]
 then
-  echo "Looks like you're running on Mac OSX."
-  echo
-  echo "NOTE: you must have the Xcode command line tools installed."
-  echo
-  echo "The instructions for obtaining these tools varies according"
-  echo "to your exact OS version.  On older versions of OS X, you"
-  echo "must register for an account on the apple developer tools"
-  echo "website: https://developer.apple.com/downloads to obtain the"
-  echo "download link."
-  echo
-  echo "We have gathered some additional instructions for each"
-  echo "version of OS X below. If you have trouble installing yt"
-  echo "after following these instructions, don't hesitate to contact"
-  echo "the yt user's e-mail list."
-  echo
-  echo "You can see which version of OSX you are running by clicking"
-  echo "'About This Mac' in the apple menu on the left hand side of"
-  echo "menu bar.  We're assuming that you've installed all operating"
-  echo "system updates; if you have an older version, we suggest"
-  echo "running software update and installing all available updates."
-  echo
-  echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
-  echo "Apple developer tools website."
-  echo
-  echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
-  echo "developer tools website.  You can either download the"
-  echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-  echo "Software Update to update to XCode 3.2.6 or"
-  echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
-  echo "bundle (4.1 GB)."
-  echo
-  echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
-  echo "(search for Xcode)."
-  echo "Alternatively, download the Xcode command line tools from"
-  echo "the Apple developer tools website."
-  echo
-  echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
-  echo "(search for Xcode)."
-  echo "Additionally, you will have to manually install the Xcode"
-  echo "command line tools, see:"
-  echo "http://stackoverflow.com/questions/9353444"
-  echo "Alternatively, download the Xcode command line tools from"
-  echo "the Apple developer tools website."
-  echo
-  echo "NOTE: It's possible that the installation will fail, if so,"
-  echo "please set the following environment variables, remove any"
-  echo "broken installation tree, and re-run this script verbatim."
-  echo
-  echo "$ export CC=gcc"
-  echo "$ export CXX=g++"
-  echo
-  MINICONDA_OS="MacOSX-x86_64"
+    if [ "${MYOS##Darwin}" != "${MYOS}" ]
+    then
+        echo "Looks like you're running on Mac OSX."
+        echo
+        echo "NOTE: you must have the Xcode command line tools installed."
+        echo
+        echo "The instructions for obtaining these tools varies according"
+        echo "to your exact OS version.  On older versions of OS X, you"
+        echo "must register for an account on the apple developer tools"
+        echo "website: https://developer.apple.com/downloads to obtain the"
+        echo "download link."
+        echo
+        echo "We have gathered some additional instructions for each"
+        echo "version of OS X below. If you have trouble installing yt"
+        echo "after following these instructions, don't hesitate to contact"
+        echo "the yt user's e-mail list."
+        echo
+        echo "You can see which version of OSX you are running by clicking"
+        echo "'About This Mac' in the apple menu on the left hand side of"
+        echo "menu bar.  We're assuming that you've installed all operating"
+        echo "system updates; if you have an older version, we suggest"
+        echo "running software update and installing all available updates."
+        echo
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
+        echo "Apple developer tools website."
+        echo
+        echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
+        echo "developer tools website.  You can either download the"
+        echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
+        echo "Software Update to update to XCode 3.2.6 or"
+        echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
+        echo "bundle (4.1 GB)."
+        echo
+        echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
+        echo "(search for Xcode)."
+        echo "Alternatively, download the Xcode command line tools from"
+        echo "the Apple developer tools website."
+        echo
+        echo "OS X 10.8.4, 10.9, 10.10, and 10.11:"
+        echo "download the appropriate version of Xcode from the"
+        echo "mac app store (search for Xcode)."
+        echo
+        echo "Additionally, you will have to manually install the Xcode"
+        echo "command line tools."
+        echo
+        echo "For OS X 10.8, see:"
+        echo "http://stackoverflow.com/questions/9353444"
+        echo
+        echo "For OS X 10.9 and newer the command line tools can be installed"
+        echo "with the following command:"
+        echo "    xcode-select --install"
+    fi
+    if [ "${MYOS##Linux}" != "${MYOS}" ]
+    then
+        echo "Looks like you're on Linux."
+        echo
+        echo "Please make sure you have the developer tools for your OS "
+        echo "installed."
+        echo
+        if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
+        then
+            echo "Looks like you're on an OpenSUSE-compatible machine."
+            echo
+            echo "You need to have these packages installed:"
+            echo
+            echo "  * devel_C_C++"
+            echo "  * libuuid-devel"
+            echo "  * gcc-c++"
+            echo "  * chrpath"
+            echo
+            echo "You can accomplish this by executing:"
+            echo
+            echo "$ sudo zypper install -t pattern devel_C_C++"
+            echo "$ sudo zypper install gcc-c++ libuuid-devel zip"
+            echo "$ sudo zypper install chrpath"
+        fi
+        if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
+        then
+            echo "Looks like you're on an Ubuntu-compatible machine."
+            echo
+            echo "You need to have these packages installed:"
+            echo
+            echo "  * libssl-dev"
+            echo "  * build-essential"
+            echo "  * libncurses5"
+            echo "  * libncurses5-dev"
+            echo "  * uuid-dev"
+            echo "  * chrpath"
+            echo
+            echo "You can accomplish this by executing:"
+            echo
+            echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev chrpath"
+            echo
+        fi
+        echo
+        echo "If you are running on a supercomputer or other module-enabled"
+        echo "system, please make sure that the GNU module has been loaded."
+        echo
+    fi
 fi
-if [ "${MYOS##Linux}" != "${MYOS}" ]
+if [ "${MYOS##x86_64}" != "${MYOS}" ]
 then
-  echo "Looks like you're on Linux."
-  echo
-  echo "Please make sure you have the developer tools for your OS installed."
-  echo
-  if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
-  then
-    echo "Looks like you're on an OpenSUSE-compatible machine."
-    echo
-    echo "You need to have these packages installed:"
-    echo
-    echo "  * devel_C_C++"
-    echo "  * libopenssl-devel"
-    echo "  * libuuid-devel"
-    echo "  * zip"
-    echo "  * gcc-c++"
-    echo "  * chrpath"
-    echo
-    echo "You can accomplish this by executing:"
-    echo
-    echo "$ sudo zypper install -t pattern devel_C_C++"
-    echo "$ sudo zypper install gcc-c++ libopenssl-devel libuuid-devel zip"
-    echo "$ sudo zypper install chrpath"
-  fi
-  if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
-  then
-    echo "Looks like you're on an Ubuntu-compatible machine."
-    echo
-    echo "You need to have these packages installed:"
-    echo
-    echo "  * libssl-dev"
-    echo "  * build-essential"
-    echo "  * libncurses5"
-    echo "  * libncurses5-dev"
-    echo "  * zip"
-    echo "  * uuid-dev"
-    echo "  * chrpath"
-    echo
-    echo "You can accomplish this by executing:"
-    echo
-    echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev chrpath"
-    echo
-  fi
-  echo
-  echo "If you are running on a supercomputer or other module-enabled"
-  echo "system, please make sure that the GNU module has been loaded."
-  echo
-  if [ "${MYOS##x86_64}" != "${MYOS}" ]
-  then
     MINICONDA_OS="Linux-x86_64"
-  elif [ "${MYOS##i386}" != "${MYOS}" ]
-  then
+elif [ "${MYOS##i386}" != "${MYOS}" ]
+then
     MINICONDA_OS="Linux-x86"
-  else
-    echo "Not sure which type of Linux you're on.  Going with x86_64."
+elif [ "${MYOS##Darwin}" != "${MYOS}" ]
+then
+     MINICONDA_OS="MacOSX-x86_64"
+else
+    echo "Not sure which Linux distro you are running."
+    echo "Going with x86_64 architecture."
     MINICONDA_OS="Linux-x86_64"
-  fi
 fi
 echo
 echo "If you'd rather not continue, hit Ctrl-C."
@@ -233,7 +238,7 @@
 if type -P wget &>/dev/null
 then
     echo "Using wget"
-    export GETFILE="wget -nv"
+    export GETFILE="wget -nv -nc"
 else
     echo "Using curl"
     export GETFILE="curl -sSO"
@@ -250,9 +255,6 @@
 
 log_cmd bash ./${MINICONDA_PKG} -b -p $DEST_DIR
 
-# I don't think we need OR want this anymore:
-#export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
-
 # This we *do* need.
 export PATH=${DEST_DIR}/bin:$PATH
 
@@ -261,51 +263,40 @@
 
 declare -a YT_DEPS
 YT_DEPS+=('python')
-YT_DEPS+=('distribute')
-YT_DEPS+=('libpng')
+YT_DEPS+=('setuptools')
 YT_DEPS+=('numpy')
-YT_DEPS+=('pygments')
-YT_DEPS+=('jinja2')
-YT_DEPS+=('tornado')
-YT_DEPS+=('pyzmq')
+YT_DEPS+=('jupyter')
 YT_DEPS+=('ipython')
 YT_DEPS+=('sphinx')
 YT_DEPS+=('h5py')
 YT_DEPS+=('matplotlib')
 YT_DEPS+=('cython')
 YT_DEPS+=('nose')
+YT_DEPS+=('conda-build')
+YT_DEPS+=('mercurial')
+YT_DEPS+=('sympy')
 
 # Here is our dependency list for yt
-log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/free
-log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/dev
-log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/gpl
 log_cmd conda update --yes conda
 
-echo "Current dependencies: ${YT_DEPS[@]}"
 log_cmd echo "DEPENDENCIES" ${YT_DEPS[@]}
-log_cmd conda install --yes ${YT_DEPS[@]}
-
-echo "Installing mercurial."
-get_ytrecipe mercurial
+for YT_DEP in "${YT_DEPS[@]}"; do
+    echo "Installing $YT_DEP"
+    log_cmd conda install --yes ${YT_DEP}
+done
 
 if [ $INST_YT_SOURCE -eq 0 ]
 then
-  echo "Installing yt as a package."
-  get_ytrecipe yt
+  echo "Installing yt"
+  log_cmd conda install --yes yt
 else
-  # We do a source install.
-  YT_DIR="${DEST_DIR}/src/yt-hg"
-  export PNG_DIR=${DEST_DIR}
-  export FTYPE_DIR=${DEST_DIR}
-  export HDF5_DIR=${DEST_DIR}
-  log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
-  pushd ${YT_DIR}
-  log_cmd python setup.py develop
-  popd
-  log_cmd cp ${YT_DIR}/doc/activate ${DEST_DIR}/bin/activate 
-  log_cmd sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate
-  log_cmd cp ${YT_DIR}/doc/activate.csh ${DEST_DIR}/bin/activate.csh
-  log_cmd sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate.csh
+    # We do a source install.
+    echo "Installing yt from source"
+    YT_DIR="${DEST_DIR}/src/yt-hg"
+    log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+    pushd ${YT_DIR}
+    log_cmd python setup.py develop
+    popd
 fi
 
 echo
@@ -314,34 +305,26 @@
 echo
 echo "yt and the Conda system are now installed in $DEST_DIR ."
 echo
-if [ $INST_YT_SOURCE -eq 0 ]
-then
-  echo "You must now modify your PATH variable by prepending:"
-  echo 
-  echo "   $DEST_DIR/bin"
-  echo
-  echo "For example, if you use bash, place something like this at the end"
-  echo "of your ~/.bashrc :"
-  echo
-  echo "   export PATH=$DEST_DIR/bin:$PATH"
-else
-  echo "To run from this new installation, use the activate script for this "
-  echo "environment."
-  echo
-  echo "    $ source $DEST_DIR/bin/activate"
-  echo
-  echo "This modifies the environment variables YT_DEST, PATH, PYTHONPATH, and"
-  echo "LD_LIBRARY_PATH to match your new yt install.  If you use csh, just"
-  echo "append .csh to the above."
-fi
+echo "You must now modify your PATH variable by prepending:"
+echo 
+echo "   $DEST_DIR/bin"
+echo
+echo "On Bash-style shells you can copy/paste the following command to "
+echo "temporarily activate the yt installtion:"
+echo
+echo "    export PATH=$DEST_DIR/bin:\$PATH"
+echo
+echo "and on csh-style shells:"
+echo
+echo "    setenv PATH $DEST_DIR/bin:\$PATH"
+echo
+echo "You can also update the init file appropriate for your shell to include"
+echo "the same command."
 echo
 echo "To get started with yt, check out the orientation:"
 echo
 echo "    http://yt-project.org/doc/orientation/"
 echo
-echo "or just activate your environment and run 'yt serve' to bring up the"
-echo "yt GUI."
-echo
 echo "For support, see the website and join the mailing list:"
 echo
 echo "    http://yt-project.org/"

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -233,53 +233,61 @@
         echo
         echo "NOTE: you must have the Xcode command line tools installed."
         echo
-	echo "The instructions for obtaining these tools varies according"
-	echo "to your exact OS version.  On older versions of OS X, you"
-	echo "must register for an account on the apple developer tools"
-	echo "website: https://developer.apple.com/downloads to obtain the"
-	echo "download link."
-	echo
-	echo "We have gathered some additional instructions for each"
-	echo "version of OS X below. If you have trouble installing yt"
-	echo "after following these instructions, don't hesitate to contact"
-	echo "the yt user's e-mail list."
-	echo
-	echo "You can see which version of OSX you are running by clicking"
-	echo "'About This Mac' in the apple menu on the left hand side of"
-	echo "menu bar.  We're assuming that you've installed all operating"
-	echo "system updates; if you have an older version, we suggest"
-	echo "running software update and installing all available updates."
-	echo
+        echo "The instructions for obtaining these tools varies according"
+        echo "to your exact OS version.  On older versions of OS X, you"
+        echo "must register for an account on the apple developer tools"
+        echo "website: https://developer.apple.com/downloads to obtain the"
+        echo "download link."
+        echo
+        echo "We have gathered some additional instructions for each"
+        echo "version of OS X below. If you have trouble installing yt"
+        echo "after following these instructions, don't hesitate to contact"
+        echo "the yt user's e-mail list."
+        echo
+        echo "You can see which version of OSX you are running by clicking"
+        echo "'About This Mac' in the apple menu on the left hand side of"
+        echo "menu bar.  We're assuming that you've installed all operating"
+        echo "system updates; if you have an older version, we suggest"
+        echo "running software update and installing all available updates."
+        echo
         echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
-	echo "Apple developer tools website."
+        echo "Apple developer tools website."
         echo
         echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
-	echo "developer tools website.  You can either download the"
-	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-	echo "Software Update to update to XCode 3.2.6 or"
-	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
-	echo "bundle (4.1 GB)."
+        echo "developer tools website.  You can either download the"
+        echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
+        echo "Software Update to update to XCode 3.2.6 or"
+        echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
+        echo "bundle (4.1 GB)."
         echo
         echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
-	echo "(search for Xcode)."
+        echo "(search for Xcode)."
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.4, 10.9, and 10.10: download the appropriate version of"
-	echo "Xcode from the mac app store (search for Xcode)."
-    echo
-	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools."
-    echo
-    echo "For OS X 10.8, see:"
-   	echo "http://stackoverflow.com/questions/9353444"
-	echo
-    echo "For OS X 10.9 and 10.10, the command line tools can be installed"
-    echo "with the following command:"
-    echo "    xcode-select --install"
-    echo
-    OSX_VERSION=`sw_vers -productVersion`
-    if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
+        echo "OS X 10.8.4, 10.9, 10.10, and 10.11:"
+        echo "download the appropriate version of Xcode from the"
+        echo "mac app store (search for Xcode)."
+        echo
+        echo "Additionally, you will have to manually install the Xcode"
+        echo "command line tools."
+        echo
+        echo "For OS X 10.8, see:"
+        echo "http://stackoverflow.com/questions/9353444"
+        echo
+        echo "For OS X 10.9 and newer the command line tools can be installed"
+        echo "with the following command:"
+        echo "    xcode-select --install"
+        echo
+        echo "For OS X 10.11, you will additionally need to install the OpenSSL"
+        echo "library using a package manager like homebrew or macports."
+        echo "If you install fails with a message like"
+        echo "    ImportError: cannot import HTTPSHandler"
+        echo "then you do not have the OpenSSL headers available in a location"
+        echo "visible to your C compiler. Consider installing yt using the"
+        echo "get_yt.sh script instead, as that bundles OpenSSL."
+        OSX_VERSION=`sw_vers -productVersion`
+        if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
         then
             MPL_SUPP_CFLAGS="${MPL_SUPP_CFLAGS} -mmacosx-version-min=10.7"
             MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
@@ -358,17 +366,17 @@
     fi
     if [ $INST_SCIPY -eq 1 ]
     then
-	echo
-	echo "Looks like you've requested that the install script build SciPy."
-	echo
-	echo "If the SciPy build fails, please uncomment one of the the lines"
-	echo "at the top of the install script that sets NUMPY_ARGS, delete"
-	echo "any broken installation tree, and re-run the install script"
-	echo "verbatim."
-	echo
-	echo "If that doesn't work, don't hesitate to ask for help on the yt"
-	echo "user's mailing list."
-	echo
+    echo
+    echo "Looks like you've requested that the install script build SciPy."
+    echo
+    echo "If the SciPy build fails, please uncomment one of the the lines"
+    echo "at the top of the install script that sets NUMPY_ARGS, delete"
+    echo "any broken installation tree, and re-run the install script"
+    echo "verbatim."
+    echo
+    echo "If that doesn't work, don't hesitate to ask for help on the yt"
+    echo "user's mailing list."
+    echo
     fi
     if [ ! -z "${CFLAGS}" ]
     then
@@ -490,9 +498,9 @@
 
 if [ $INST_PY3 -eq 1 ]
 then
-	 PYTHON_EXEC='python3.4'
+     PYTHON_EXEC='python3.4'
 else 
-	 PYTHON_EXEC='python2.7'
+     PYTHON_EXEC='python2.7'
 fi
 
 function do_setup_py
@@ -899,28 +907,28 @@
 else
     if [ ! -e $SCIPY/done ]
     then
-	if [ ! -e BLAS/done ]
-	then
-	    tar xfz blas.tar.gz
-	    echo "Building BLAS"
-	    cd BLAS
-	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
-	    ( ar r libfblas.a *.o 2>&1 ) 1>> ${LOG_FILE}
-	    ( ranlib libfblas.a 2>&1 ) 1>> ${LOG_FILE}
-	    rm -rf *.o
-	    touch done
-	    cd ..
-	fi
-	if [ ! -e $LAPACK/done ]
-	then
-	    tar xfz $LAPACK.tar.gz
-	    echo "Building LAPACK"
-	    cd $LAPACK/
-	    cp INSTALL/make.inc.gfortran make.inc
-	    ( make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 2>&1 ) 1>> ${LOG_FILE} || do_exit
-	    touch done
-	    cd ..
-	fi
+    if [ ! -e BLAS/done ]
+    then
+        tar xfz blas.tar.gz
+        echo "Building BLAS"
+        cd BLAS
+        gfortran -O2 -fPIC -fno-second-underscore -c *.f
+        ( ar r libfblas.a *.o 2>&1 ) 1>> ${LOG_FILE}
+        ( ranlib libfblas.a 2>&1 ) 1>> ${LOG_FILE}
+        rm -rf *.o
+        touch done
+        cd ..
+    fi
+    if [ ! -e $LAPACK/done ]
+    then
+        tar xfz $LAPACK.tar.gz
+        echo "Building LAPACK"
+        cd $LAPACK/
+        cp INSTALL/make.inc.gfortran make.inc
+        ( make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        touch done
+        cd ..
+    fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
     export LAPACK=$PWD/$LAPACK/liblapack.a
@@ -1030,7 +1038,7 @@
 cd $MY_PWD
 
 if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import readline" 2>&1 )>> ${LOG_FILE}) || \
-	[[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]] 
+    [[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]] 
 then
     if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
     then

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -1,3 +1,5 @@
+.. _photon_simulator:
+
 Constructing Mock X-ray Observations
 ------------------------------------
 
@@ -98,9 +100,8 @@
    `AtomDB <http://www.atomdb.org>`_ and get the files from the
    `xray_data <http://yt-project.org/data/xray_data.tar.gz>`_ auxiliary
    data package (see the ``xray_data`` `README <xray_data_README.html>`_ 
-   for details on the latter). Make sure that
-   in what follows you specify the full path to the locations of these
-   files.
+   for details on the latter). Make sure that in what follows you 
+   specify the full path to the locations of these files.
 
 To generate photons from this dataset, we have several different things
 we need to set up. The first is a standard yt data object. It could
@@ -197,7 +198,7 @@
 
 .. code:: python
 
-    A = 6000.
+    A = 3000.
     exp_time = 4.0e5
     redshift = 0.05
     cosmo = Cosmology()
@@ -298,7 +299,7 @@
 
 The second option, ``TableAbsorbModel``, takes as input an HDF5 file
 containing two datasets, ``"energy"`` (in keV), and ``"cross_section"``
-(in cm2), and the Galactic column density :math:`N_H`:
+(in :math:`cm^2`), and the Galactic column density :math:`N_H`:
 
 .. code:: python
 
@@ -307,7 +308,7 @@
 Now we're ready to project the photons. First, we choose a line-of-sight
 vector ``normal``. Second, we'll adjust the exposure time and the redshift.
 Third, we'll pass in the absorption ``SpectrumModel``. Fourth, we'll
-specify a ``sky_center`` in RA,DEC on the sky in degrees.
+specify a ``sky_center`` in RA and DEC on the sky in degrees.
 
 Also, we're going to convolve the photons with instrument ``responses``.
 For this, you need a ARF/RMF pair with matching energy bins. This is of
@@ -322,8 +323,8 @@
 
 .. code:: python
 
-    ARF = "chandra_ACIS-S3_onaxis_arf.fits"
-    RMF = "chandra_ACIS-S3_onaxis_rmf.fits"
+    ARF = "acisi_aimpt_cy17.arf"
+    RMF = "acisi_aimpt_cy17.rmf"
     normal = [0.0,0.0,1.0]
     events = photons.project_photons(normal, exp_time_new=2.0e5, redshift_new=0.07, dist_new=None, 
                                      absorb_model=abs_model, sky_center=(187.5,12.333), responses=[ARF,RMF], 
@@ -540,7 +541,7 @@
 
    sphere = ds.sphere("c", (1.0,"Mpc"))
        
-   A = 6000.
+   A = 3000.
    exp_time = 2.0e5
    redshift = 0.05
    cosmo = Cosmology()
@@ -555,7 +556,8 @@
 
 
    events = photons.project_photons([0.0,0.0,1.0], 
-                                    responses=["sim_arf.fits","sim_rmf.fits"], 
+                                    responses=["acisi_aimpt_cy17.arf",
+                                               "acisi_aimpt_cy17.rmf"], 
                                     absorb_model=abs_model,
                                     north_vector=[0.0,1.0,0.0])
 

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -4,6 +4,13 @@
 # In this example we will show how to use the AMRKDTree to take a simulation
 # with 8 levels of refinement and only use levels 0-3 to render the dataset.
 
+# Currently this cookbook is flawed in that the data that is covered by the
+# higher resolution data gets masked during the rendering.  This should be
+# fixed by changing either the data source or the code in
+# yt/utilities/amr_kdtree.py where data is being masked for the partitioned
+# grid.  Right now the quick fix is to create a data_collection, but this
+# will only work for patch based simulations that have ds.index.grids.
+
 # We begin by loading up yt, and importing the AMRKDTree
 import numpy as np
 
@@ -12,58 +19,58 @@
 
 # Load up a dataset and define the kdtree
 ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-kd = AMRKDTree(ds)
+im, sc = yt.volume_render(ds, 'density', fname='v0.png')
+sc.camera.set_width(ds.arr(100, 'kpc'))
+render_source = sc.get_source(0)
+kd=render_source.volume
 
 # Print out specifics of KD Tree
 print("Total volume of all bricks = %i" % kd.count_volume())
 print("Total number of cells = %i" % kd.count_cells())
 
-# Define a camera and take an volume rendering.
-tf = yt.ColorTransferFunction((-30, -22))
-cam = ds.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256,
-                  tf, volume=kd)
-tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5], colormap='RdBu_r')
-cam.snapshot("v1.png", clip_ratio=6.0)
-
-# This rendering is okay, but lets say I'd like to improve it, and I don't want
-# to spend the time rendering the high resolution data.  What we can do is
-# generate a low resolution version of the AMRKDTree and pass that in to the
-# camera.  We do this by specifying a maximum refinement level of 6.
-
-kd_low_res = AMRKDTree(ds, max_level=6)
+new_source = ds.all_data()
+new_source.max_level=3
+kd_low_res = AMRKDTree(ds, data_source=new_source)
 print(kd_low_res.count_volume())
 print(kd_low_res.count_cells())
 
 # Now we pass this in as the volume to our camera, and render the snapshot
 # again.
 
-cam.volume = kd_low_res
-cam.snapshot("v4.png", clip_ratio=6.0)
+render_source.set_volume(kd_low_res)
+render_source.set_fields('density')
+sc.render()
+sc.save("v1.png", sigma_clip=6.0)
 
 # This operation was substantiall faster.  Now lets modify the low resolution
 # rendering until we find something we like.
 
+tf = render_source.transfer_function
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=np.ones(4, dtype='float64'), colormap='RdBu_r')
-cam.snapshot("v2.png", clip_ratio=6.0)
+sc.render()
+sc.save("v2.png", sigma_clip=6.0)
 
 # This looks better.  Now let's try turning on opacity.
 
 tf.grey_opacity = True
-cam.snapshot("v4.png", clip_ratio=6.0)
-
-# That seemed to pick out som interesting structures.  Now let's bump up the
-# opacity.
-
+sc.render()
+sc.save("v3.png", sigma_clip=6.0)
+#
+## That seemed to pick out som interesting structures.  Now let's bump up the
+## opacity.
+#
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r')
-cam.snapshot("v3.png", clip_ratio=6.0)
-
-# This looks pretty good, now lets go back to the full resolution AMRKDTree
-
-cam.volume = kd
-cam.snapshot("v4.png", clip_ratio=6.0)
+sc.render()
+sc.save("v4.png", sigma_clip=6.0)
+#
+## This looks pretty good, now lets go back to the full resolution AMRKDTree
+#
+render_source.set_volume(kd)
+sc.render()
+sc.save("v5.png", sigma_clip=6.0)
 
 # This looks great!

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -3,40 +3,29 @@
 
 # Follow the simple_volume_rendering cookbook for the first part of this.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")  # load data
-ad = ds.all_data()
-mi, ma = ad.quantities.extrema("density")
-
-# Set up transfer function
-tf = yt.ColorTransferFunction((np.log10(mi), np.log10(ma)))
-tf.add_layers(6, w=0.05)
-
-# Set up camera paramters
-c = [0.5, 0.5, 0.5]  # Center
-L = [1, 1, 1]  # Normal Vector
-W = 1.0  # Width
-Nvec = 512  # Pixels on a side
-
-# Specify a north vector, which helps with rotations.
-north_vector = [0., 0., 1.]
+sc = yt.create_scene(ds)
+cam = sc.camera
+cam.resolution = (512, 512)
+cam.set_width(ds.domain_width/20.0)
 
 # Find the maximum density location, store it in max_c
 v, max_c = ds.find_max('density')
 
-# Initialize the Camera
-cam = ds.camera(c, L, W, (Nvec, Nvec), tf, north_vector=north_vector)
 frame = 0
-
-# Do a rotation over 5 frames
-for i, snapshot in enumerate(cam.rotation(np.pi, 5, clip_ratio=8.0)):
-    snapshot.write_png('camera_movement_%04i.png' % frame)
-    frame += 1
-
 # Move to the maximum density location over 5 frames
-for i, snapshot in enumerate(cam.move_to(max_c, 5, clip_ratio=8.0)):
-    snapshot.write_png('camera_movement_%04i.png' % frame)
+for _ in cam.iter_move(max_c, 5):
+    sc.render()
+    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
     frame += 1
 
 # Zoom in by a factor of 10 over 5 frames
-for i, snapshot in enumerate(cam.zoomin(10.0, 5, clip_ratio=8.0)):
-    snapshot.write_png('camera_movement_%04i.png' % frame)
+for _ in cam.iter_zoom(10.0, 5):
+    sc.render()
+    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
     frame += 1
+
+# Do a rotation over 5 frames
+for _ in cam.iter_rotate(np.pi, 5):
+    sc.render()
+    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    frame += 1

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -196,10 +196,41 @@
 
 In this recipe, we move a camera through a domain and take multiple volume
 rendering snapshots.
-See :ref:`volume_rendering` for more information.
+See :ref:`camera_movement` for more information.
 
 .. yt_cookbook:: camera_movement.py
 
+Volume Rendering with Custom Camera
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this recipe we modify the :ref:`cookbook-simple_volume_rendering` recipe to
+use customized camera properties. See :ref:`volume_rendering` for more
+information.
+
+.. yt_cookbook:: custom_camera_volume_rendering.py
+
+.. _cookbook-custom-transfer-function:
+
+Volume Rendering with a Custom Transfer Function
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this recipe we modify the :ref:`cookbook-simple_volume_rendering` recipe to
+use customized camera properties. See :ref:`volume_rendering` for more
+information.
+
+.. yt_cookbook:: custom_transfer_function_volume_rendering.py
+
+.. _cookbook-sigma_clip:
+
+Volume Rendering with Sigma Clipping
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this recipe we output several images with different values of sigma_clip
+set in order to change the contrast of the resulting image.  See 
+:ref:`sigma_clip` for more information.
+
+.. yt_cookbook:: sigma_clip.py
+
 Zooming into an Image
 ~~~~~~~~~~~~~~~~~~~~~
 
@@ -212,6 +243,15 @@
 
 .. yt_cookbook:: zoomin_frames.py
 
+.. _cookbook-various_lens:
+
+Various Lens Types for Volume Rendering
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This example illustrates the usage and feature of different lenses for volume rendering.
+
+.. yt_cookbook:: various_lens.py
+
 .. _cookbook-opaque_rendering:
 
 Opaque Volume Rendering
@@ -220,7 +260,7 @@
 This recipe demonstrates how to make semi-opaque volume renderings, but also
 how to step through and try different things to identify the type of volume
 rendering you want.
-See :ref:`volume_rendering` for more information.
+See :ref:`opaque_rendering` for more information.
 
 .. yt_cookbook:: opaque_rendering.py
 
@@ -235,23 +275,27 @@
 
 .. yt_cookbook:: amrkdtree_downsampling.py
 
+.. _cookbook-volume_rendering_annotations:
+
 Volume Rendering with Bounding Box and Overlaid Grids
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 This recipe demonstrates how to overplot a bounding box on a volume rendering
 as well as overplotting grids representing the level of refinement achieved
 in different regions of the code.
-See :ref:`volume_rendering` for more information.
+See :ref:`volume_rendering_annotations` for more information.
 
 .. yt_cookbook:: rendering_with_box_and_grids.py
 
 Volume Rendering with Annotation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 This recipe demonstrates how to write the simulation time, show an
 axis triad indicating the direction of the coordinate system, and show
-the transfer function on a volume rendering.
-See :ref:`volume_rendering` for more information.
+the transfer function on a volume rendering.  Please note that this 
+recipe relies on the old volume rendering interface.  While one can
+continue to use this interface, it may be incompatible with some of the
+new developments and the infrastructure described in :ref:`volume_rendering`.
 
 .. yt_cookbook:: vol-annotated.py
 

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/cookbook/custom_camera_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/custom_camera_volume_rendering.py
@@ -0,0 +1,22 @@
+import yt
+
+# Load the dataset
+ds = yt.load("Enzo_64/DD0043/data0043")
+
+# Create a volume rendering
+sc = yt.create_scene(ds, field=('gas', 'density'))
+
+# Now increase the resolution
+sc.camera.resolution = (1024, 1024)
+
+# Set the camera focus to a position that is offset from the center of
+# the domain
+sc.camera.focus = ds.arr([0.3, 0.3, 0.3], 'unitary')
+
+# Move the camera position to the other side of the dataset
+sc.camera.position = ds.arr([0, 0, 0], 'unitary')
+
+# save to disk with a custom filename and apply sigma clipping to eliminate
+# very bright pixels, producing an image with better contrast.
+sc.render()
+sc.save('custom.png', sigma_clip=4)

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/cookbook/custom_transfer_function_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/custom_transfer_function_volume_rendering.py
@@ -0,0 +1,24 @@
+import yt
+import numpy as np
+
+# Load the dataset
+ds = yt.load("Enzo_64/DD0043/data0043")
+
+# Create a volume rendering
+sc = yt.create_scene(ds, field=('gas', 'density'))
+
+# Modify the transfer function
+
+# First get the render source, in this case the entire domain, with field ('gas','density')
+render_source = sc.get_source(0)
+
+# Clear the transfer function
+render_source.transfer_function.clear()
+
+# Map a range of density values (in log space) to the Reds_r colormap
+render_source.transfer_function.map_to_colormap(
+    np.log10(ds.quan(5.0e-31, 'g/cm**3')),
+    np.log10(ds.quan(1.0e-29, 'g/cm**3')),
+    scale=30.0, colormap='RdBu_r')
+
+sc.save('new_tf.png')

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/cookbook/image_background_colors.py
--- a/doc/source/cookbook/image_background_colors.py
+++ b/doc/source/cookbook/image_background_colors.py
@@ -2,27 +2,14 @@
 # volume renderings, to pngs with varying backgrounds.
 
 # First we use the simple_volume_rendering.py recipe from above to generate
-# a standard volume rendering.  The only difference is that we use 
-# grey_opacity=True with our TransferFunction, as the colored background 
-# functionality requires images with an opacity between 0 and 1. 
-
-# We have removed all the comments from the volume rendering recipe for 
-# brevity here, but consult the recipe for more details.
+# a standard volume rendering.
 
 import yt
 import numpy as np
 
 ds = yt.load("Enzo_64/DD0043/data0043")
-ad = ds.all_data()
-mi, ma = ad.quantities.extrema("density")
-tf = yt.ColorTransferFunction((np.log10(mi)+1, np.log10(ma)), grey_opacity=True)
-tf.add_layers(5, w=0.02, colormap="spectral")
-c = [0.5, 0.5, 0.5]
-L = [0.5, 0.2, 0.7]
-W = 1.0
-Npixels = 512
-cam = ds.camera(c, L, W, Npixels, tf)
-im = cam.snapshot("original.png" % ds, clip_ratio=8.0)
+im, sc = yt.volume_render(ds, 'density')
+im.write_png("original.png", sigma_clip=8.0)
 
 # Our image array can now be transformed to include different background
 # colors.  By default, the background color is black.  The following
@@ -35,10 +22,10 @@
 # None  (0.,0.,0.,0.) <-- Transparent!
 # any rgba list/array: [r,g,b,a], bounded by 0..1
 
-# We include the clip_ratio=8 keyword here to bring out more contrast between
+# We include the sigma_clip=8 keyword here to bring out more contrast between
 # the background and foreground, but it is entirely optional.
 
-im.write_png('black_bg.png', background='black', clip_ratio=8.0)
-im.write_png('white_bg.png', background='white', clip_ratio=8.0)
-im.write_png('green_bg.png', background=[0.,1.,0.,1.], clip_ratio=8.0)
-im.write_png('transparent_bg.png', background=None, clip_ratio=8.0)
+im.write_png('black_bg.png', background='black', sigma_clip=8.0)
+im.write_png('white_bg.png', background='white', sigma_clip=8.0)
+im.write_png('green_bg.png', background=[0.,1.,0.,1.], sigma_clip=8.0)
+im.write_png('transparent_bg.png', background=None, sigma_clip=8.0)

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -44,8 +44,10 @@
    embedded_webm_animation
    gadget_notebook
    owls_notebook
+   ../visualizing/transfer_function_helper
    ../analyzing/analysis_modules/sunyaev_zeldovich
    fits_radio_cubes
    fits_xray_images
    tipsy_notebook
    halo_analysis_example
+   ../visualizing/volume_rendering_tutorial

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/cookbook/offaxis_projection.py
--- a/doc/source/cookbook/offaxis_projection.py
+++ b/doc/source/cookbook/offaxis_projection.py
@@ -11,7 +11,7 @@
 # objects, you could set it the way you would a cutting plane -- but for this
 # dataset, we'll just choose an off-axis value at random.  This gets normalized
 # automatically.
-L = [0.5, 0.4, 0.7]
+L = [1.0, 0.0, 0.0]
 
 # Our "width" is the width of the image plane as well as the depth.
 # The first element is the left to right width, the second is the
@@ -26,7 +26,7 @@
 # Create the off axis projection.
 # Setting no_ghost to False speeds up the process, but makes a
 # slighly lower quality image.
-image = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
+image, sc= yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
 
 # Write out the final image and give it a name
 # relating to what our dataset is called.

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/cookbook/offaxis_projection_colorbar.py
--- a/doc/source/cookbook/offaxis_projection_colorbar.py
+++ b/doc/source/cookbook/offaxis_projection_colorbar.py
@@ -32,7 +32,7 @@
 # Also note that we set the field which we want to project as "density", but
 # really we could use any arbitrary field like "temperature", "metallicity"
 # or whatever.
-image = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
+image, sc = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
 
 # Image is now an NxN array representing the intensities of the various pixels.
 # And now, we call our direct image saver.  We save the log of the result.

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -3,44 +3,51 @@
 
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-# We start by building a transfer function, and initializing a camera.
+# We start by building a default volume rendering scene 
 
-tf = yt.ColorTransferFunction((-30, -22))
-cam = ds.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256, tf)
+im, sc = yt.volume_render(ds, field=("gas","density"), fname="v0.png", sigma_clip=6.0)
 
-# Now let's add some isocontours, and take a snapshot.
-
-tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5], colormap = 'RdBu_r')
-cam.snapshot("v1.png", clip_ratio=6.0)
+sc.camera.set_width(ds.arr(0.1,'code_length'))
+tf = sc.get_source(0).transfer_function 
+tf.clear()
+tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+        alpha=np.logspace(-3,0,4), colormap = 'RdBu_r')
+sc.render()
+sc.save("v1.png", sigma_clip=6.0)
 
 # In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
 # alpha values for each contour to go between 0.1 and 1.0
 
+tf = sc.get_source(0).transfer_function 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
-cam.snapshot("v2.png", clip_ratio=6.0)
+sc.render()
+sc.save("v2.png", sigma_clip=6.0)
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
 # start to be obcured
 
 tf.grey_opacity = True
-cam.snapshot("v3.png", clip_ratio=6.0)
+sc.render()
+sc.save("v3.png", sigma_clip=6.0)
 
 # That looks pretty good, but let's start bumping up the opacity.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.snapshot("v4.png", clip_ratio=6.0)
+sc.render()
+sc.save("v4.png", sigma_clip=6.0)
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.snapshot("v5.png", clip_ratio=6.0)
+sc.render()
+sc.save("v5.png", sigma_clip=6.0)
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
 # layer
@@ -48,13 +55,15 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.snapshot("v6.png", clip_ratio=6.0)
+sc.render()
+sc.save("v6.png", sigma_clip=6.0)
 
 # That is very opaque!  Now lets go back and see what it would look like with
 # grey_opacity = False
 
 tf.grey_opacity=False
-cam.snapshot("v7.png", clip_ratio=6.0)
+sc.render()
+sc.save("v7.png", sigma_clip=6.0)
 
 # That looks pretty different, but the main thing is that you can see that the
 # inner contours are somewhat visible again.  

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -1,61 +1,22 @@
 import yt
 import numpy as np
+from yt.visualization.volume_rendering.api import BoxSource, CoordinateVectorSource
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
+sc = yt.create_scene(ds, ('gas','density'))
+sc.get_source(0).transfer_function.grey_opacity=True
 
-# Create a data container (like a sphere or region) that
-# represents the entire domain.
-ad = ds.all_data()
+sc.annotate_domain(ds)
+sc.render()
+sc.save("%s_vr_domain.png" % ds)
 
-# Get the minimum and maximum densities.
-mi, ma = ad.quantities.extrema("density")
-
-# Create a transfer function to map field values to colors.
-# We bump up our minimum to cut out some of the background fluid
-tf = yt.ColorTransferFunction((np.log10(mi)+2.0, np.log10(ma)))
-
-# Add three Gaussians, evenly spaced between the min and
-# max specified above with widths of 0.02 and using the
-# gist_stern colormap.
-tf.add_layers(3, w=0.02, colormap="gist_stern")
-
-# Choose a center for the render.
-c = [0.5, 0.5, 0.5]
-
-# Choose a vector representing the viewing direction.
-L = [0.5, 0.2, 0.7]
-
-# Set the width of the image.
-# Decreasing or increasing this value
-# results in a zoom in or out.
-W = 1.0
-
-# The number of pixels along one side of the image.
-# The final image will have Npixel^2 pixels.
-Npixels = 512
-
-# Create a camera object.
-# This object creates the images and
-# can be moved and rotated.
-cam = ds.camera(c, L, W, Npixels, tf)
-
-# Create a snapshot.
-# The return value of this function could also be accepted, modified (or saved
-# for later manipulation) and then put written out using write_bitmap.
-# clip_ratio applies a maximum to the function, which is set to that value
-# times the .std() of the array.
-im = cam.snapshot("%s_volume_rendered.png" % ds, clip_ratio=8.0)
-
-# Add the domain edges, with an alpha blending of 0.3:
-nim = cam.draw_domain(im, alpha=0.3)
-nim.write_png('%s_vr_domain.png' % ds)
-
-# Add the grids, colored by the grid level with the algae colormap
-nim = cam.draw_grids(im, alpha=0.3, cmap='algae')
-nim.write_png('%s_vr_grids.png' % ds)
+sc.annotate_grids(ds)
+sc.render()
+sc.save("%s_vr_grids.png" % ds)
 
 # Here we can draw the coordinate vectors on top of the image by processing
 # it through the camera. Then save it out.
-cam.draw_coordinate_vectors(nim)
-nim.write_png("%s_vr_vectors.png" % ds)
+sc.annotate_axes()
+sc.render()
+sc.save("%s_vr_coords.png" % ds)

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/cookbook/sigma_clip.py
--- /dev/null
+++ b/doc/source/cookbook/sigma_clip.py
@@ -0,0 +1,17 @@
+import yt
+
+# Load the dataset.
+ds = yt.load("enzo_tiny_cosmology/RD0009/RD0009")
+
+# Create a volume rendering, which will determine data bounds, use the first
+# acceptable field in the field_list, and set up a default transfer function.
+
+# Render and save output images with different levels of sigma clipping.
+# Sigma clipping removes the highest intensity pixels in a volume render, 
+# which affects the overall contrast of the image.
+sc = yt.create_scene(ds, field=('gas', 'density'))
+sc.render()
+sc.save('clip_0.png')
+sc.save('clip_2.png', sigma_clip=2)
+sc.save('clip_4.png', sigma_clip=4)
+sc.save('clip_6.png', sigma_clip=6)

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/cookbook/simple_volume_rendering.py
--- a/doc/source/cookbook/simple_volume_rendering.py
+++ b/doc/source/cookbook/simple_volume_rendering.py
@@ -1,48 +1,10 @@
 import yt
-import numpy as np
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
 
-# Create a data container (like a sphere or region) that
-# represents the entire domain.
-ad = ds.all_data()
+# Create a volume rendering, which will determine data bounds, use the first
+# acceptable field in the field_list, and set up a default transfer function.
 
-# Get the minimum and maximum densities.
-mi, ma = ad.quantities.extrema("density")
-
-# Create a transfer function to map field values to colors.
-# We bump up our minimum to cut out some of the background fluid
-tf = yt.ColorTransferFunction((np.log10(mi)+1, np.log10(ma)))
-
-# Add five Gaussians, evenly spaced between the min and
-# max specified above with widths of 0.02 and using the
-# spectral colormap.
-tf.add_layers(5, w=0.02, colormap="spectral")
-
-# Choose a center for the render.
-c = [0.5, 0.5, 0.5]
-
-# Choose a vector representing the viewing direction.
-L = [0.5, 0.2, 0.7]
-
-# Set the width of the image.
-# Decreasing or increasing this value
-# results in a zoom in or out.
-W = 1.0
-
-# The number of pixels along one side of the image.
-# The final image will have Npixel^2 pixels.
-Npixels = 512
-
-# Create a camera object.
-# This object creates the images and
-# can be moved and rotated.
-cam = ds.camera(c, L, W, Npixels, tf)
-
-# Create a snapshot.
-# The return value of this function could also be accepted, modified (or saved
-# for later manipulation) and then put written out using write_bitmap.
-# clip_ratio applies a maximum to the function, which is set to that value
-# times the .std() of the array.
-cam.snapshot("%s_volume_rendered.png" % ds, clip_ratio=8.0)
+# This will save a file named 'data0043_Render_density.png' to disk.
+im, sc = yt.volume_render(ds, field=('gas', 'density'))

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/cookbook/various_lens.py
--- /dev/null
+++ b/doc/source/cookbook/various_lens.py
@@ -0,0 +1,120 @@
+import yt
+from yt.visualization.volume_rendering.api import Scene, Camera, VolumeSource
+import numpy as np
+
+field = ("gas", "density")
+
+# normal_vector points from camera to the center of tbe final projection.
+# Now we look at the positive x direction.
+normal_vector = [1., 0., 0.]
+# north_vector defines the "top" direction of the projection, which is
+# positive z direction here.
+north_vector = [0., 0., 1.]
+
+# Follow the simple_volume_rendering cookbook for the first part of this.
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+sc = Scene()
+vol = VolumeSource(ds, field=field)
+tf = vol.transfer_function
+tf.grey_opacity = True
+
+# Plane-parallel lens
+cam = Camera(ds, lens_type='plane-parallel')
+# Set the resolution of tbe final projection.
+cam.resolution = [250, 250]
+# Set the location of the camera to be (x=0.2, y=0.5, z=0.5)
+# For plane-parallel lens, the location info along the normal_vector (here
+# is x=0.2) is ignored. 
+cam.position = ds.arr(np.array([0.2, 0.5, 0.5]), 'code_length')
+# Set the orientation of the camera.
+cam.switch_orientation(normal_vector=normal_vector,
+                       north_vector=north_vector)
+# Set the width of the camera, where width[0] and width[1] specify the length and
+# height of final projection, while width[2] in plane-parallel lens is not used.
+cam.set_width(ds.domain_width * 0.5)
+sc.camera = cam
+sc.add_source(vol)
+sc.render()
+sc.save('lens_plane-parallel.png', sigma_clip=6.0)
+
+# Perspective lens
+cam = Camera(ds, lens_type='perspective')
+cam.resolution = [250, 250]
+# Standing at (x=0.2, y=0.5, z=0.5), we look at the area of x>0.2 (with some open angle
+# specified by camera width) along the positive x direction.
+cam.position = ds.arr([0.2, 0.5, 0.5], 'code_length')
+cam.switch_orientation(normal_vector=normal_vector,
+                       north_vector=north_vector)
+# Set the width of the camera, where width[0] and width[1] specify the length and
+# height of the final projection, while width[2] specifies the distance between the
+# camera and the final image.
+cam.set_width(ds.domain_width * 0.5)
+sc.camera = cam
+sc.add_source(vol)
+sc.render()
+sc.save('lens_perspective.png', sigma_clip=6.0)
+
+# Stereo-perspective lens
+cam = Camera(ds, lens_type='stereo-perspective')
+# Set the size ratio of the final projection to be 2:1, since stereo-perspective lens
+# will generate the final image with both left-eye and right-eye ones jointed together.
+cam.resolution = [500, 250]
+cam.position = ds.arr([0.2, 0.5, 0.5], 'code_length')
+cam.switch_orientation(normal_vector=normal_vector,
+                       north_vector=north_vector)
+cam.set_width(ds.domain_width*0.5)
+# Set the distance between left-eye and right-eye.
+cam.lens.disparity = ds.domain_width[0] * 1.e-3
+sc.camera = cam
+sc.add_source(vol)
+sc.render()
+sc.save('lens_stereo-perspective.png', sigma_clip=6.0)
+
+# Fisheye lens
+dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
+cam = Camera(dd, lens_type='fisheye')
+cam.resolution = [250, 250]
+v, c = ds.find_max(field)
+cam.set_position(c - 0.0005 * ds.domain_width)
+cam.switch_orientation(normal_vector=normal_vector,
+                       north_vector=north_vector)
+cam.set_width(ds.domain_width)
+cam.lens.fov = 360.0
+sc.camera = cam
+sc.add_source(vol)
+sc.render()
+sc.save('lens_fisheye.png', sigma_clip=6.0)
+
+# Spherical lens
+cam = Camera(ds, lens_type='spherical')
+# Set the size ratio of the final projection to be 2:1, since spherical lens
+# will generate the final image with length of 2*pi and height of pi.
+cam.resolution = [500, 250]
+# Standing at (x=0.4, y=0.5, z=0.5), we look in all the radial directions
+# from this point in spherical coordinate.
+cam.position = ds.arr([0.4, 0.5, 0.5], 'code_length')
+cam.switch_orientation(normal_vector=normal_vector,
+                       north_vector=north_vector)
+# In (stereo)spherical camera, camera width is not used since the entire volume
+# will be rendered
+sc.camera = cam
+sc.add_source(vol)
+sc.render()
+sc.save('lens_spherical.png', sigma_clip=6.0)
+
+# Stereo-spherical lens
+cam = Camera(ds, lens_type='stereo-spherical')
+# Set the size ratio of the final projection to be 4:1, since spherical-perspective lens
+# will generate the final image with both left-eye and right-eye ones jointed together.
+cam.resolution = [1000, 250]
+cam.position = ds.arr([0.4, 0.5, 0.5], 'code_length')
+cam.switch_orientation(normal_vector=normal_vector,
+                       north_vector=north_vector)
+# In (stereo)spherical camera, camera width is not used since the entire volume
+# will be rendered
+# Set the distance between left-eye and right-eye.
+cam.lens.disparity = ds.domain_width[0] * 1.e-3
+sc.camera = cam
+sc.add_source(vol)
+sc.render()
+sc.save('lens_stereo-spherical.png', sigma_clip=6.0)

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/cookbook/vol-annotated.py
--- a/doc/source/cookbook/vol-annotated.py
+++ b/doc/source/cookbook/vol-annotated.py
@@ -4,7 +4,7 @@
 import pylab
 
 import yt
-import yt.visualization.volume_rendering.api as vr
+import yt.visualization.volume_rendering.old_camera as vr
 
 ds = yt.load("maestro_subCh_plt00248")
 
@@ -17,11 +17,11 @@
 # centered on these with width sigma        
 vals = [-1.e7, -5.e6, -2.5e6, 2.5e6, 5.e6, 1.e7]
 sigma = 2.e5
-        
+
 mi, ma = min(vals), max(vals)
 
 # Instantiate the ColorTransferfunction.
-tf =  vr.ColorTransferFunction((mi, ma))
+tf =  yt.ColorTransferFunction((mi, ma))
 
 for v in vals:
     tf.sample_colormap(v, sigma**2, colormap="coolwarm")
@@ -69,7 +69,7 @@
 
 # tell the camera to use our figure
 cam._render_figure = f
-    
+
 # save annotated -- this added the transfer function values, 
 # and the clear_fig=False ensures it writes onto our existing figure.
 cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1026,6 +1026,60 @@
 * Some functions may behave oddly or not work at all.
 * Data must already reside in memory.
 
+Unstructured Grid Data
+----------------------
+
+See :ref:`loading-numpy-array`,
+:func:`~yt.frontends.stream.data_structures.load_unstructured_mesh` for
+more detail.
+
+In addition to the above grid types, you can also load data stored on
+unstructured meshes. This type of mesh is used, for example, in many
+finite element calculations. Currently, hexahedral, tetrahedral, and
+wedge-shaped mesh element are supported.
+
+To load an unstructured mesh, you need to specify the following. First,
+you need to have a coordinates array, which should be an (L, 3) array
+that stores the (x, y, z) positions of all of the vertices in the mesh.
+Second, you need to specify a connectivity array, which describes how
+those vertices are connected into mesh elements. The connectivity array
+should be (N, M), where N is the number of elements and M is the
+connectivity length, i.e. the number of vertices per element. Finally,
+you must also specify a data dictionary, where the keys should be
+the names of the fields and the values should be numpy arrays that
+contain the field data. These arrays can either supply the cell-averaged
+data for each element, in which case they would be (N, 1), or they
+can have node-centered data, in which case they would also be (N, M).
+
+Here is an example of how to load an in-memory, unstructured mesh dataset:
+
+.. code-block:: python
+
+   import yt
+   import numpy
+   from yt.utilities.exodusII_reader import get_data
+
+   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
+
+This uses a publically available `MOOSE <http://mooseframework.org/>` 
+dataset along with the get_data function to parse the coords, connectivity, 
+and data. Then, these can be loaded as an in-memory dataset as follows:
+
+.. code-block:: python
+
+    mesh_id = 0
+    ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+
+Note that load_unstructured_mesh can take either a single or a list of meshes.
+Here, we have selected only the first mesh to load.
+
+.. rubric:: Caveats
+
+* Units will be incorrect unless the data has already been converted to cgs.
+* Integration is not implemented.
+* Some functions may behave oddly or not work at all.
+* Data must already reside in memory.
+
 Generic Particle Data
 ---------------------
 

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/quickstart/6)_Volume_Rendering.ipynb
--- a/doc/source/quickstart/6)_Volume_Rendering.ipynb
+++ b/doc/source/quickstart/6)_Volume_Rendering.ipynb
@@ -56,14 +56,14 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "If we want to apply a clipping, we can specify the `clip_ratio`.  This will clip the upper bounds to this value times the standard deviation of the values in the image array."
+      "If we want to apply a clipping, we can specify the `sigma_clip`.  This will clip the upper bounds to this value times the standard deviation of the values in the image array."
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "cam.show(clip_ratio=4)"
+      "cam.show(sigma_clip=4)"
      ],
      "language": "python",
      "metadata": {},
@@ -83,7 +83,7 @@
       "tf = yt.ColorTransferFunction((-28, -25))\n",
       "tf.add_layers(4, w=0.03)\n",
       "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20.0, 'kpc'), 512, tf, no_ghost=False)\n",
-      "cam.show(clip_ratio=4.0)"
+      "cam.show(sigma_clip=4.0)"
      ],
      "language": "python",
      "metadata": {},
@@ -93,4 +93,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -418,6 +418,7 @@
    ~yt.frontends.stream.data_structures.load_amr_grids
    ~yt.frontends.stream.data_structures.load_particles
    ~yt.frontends.stream.data_structures.load_hexahedral_mesh
+   ~yt.frontends.stream.data_structures.load_unstructured_mesh
 
 Derived Datatypes
 -----------------
@@ -606,36 +607,57 @@
 
 See also :ref:`volume_rendering`.
 
-Here are the primary entry points:
+Here are the primary entry points and the main classes involved in the 
+Scene infrastructure:
 
 .. autosummary::
    :toctree: generated/
 
+   ~yt.visualization.volume_rendering.volume_rendering.volume_render
+   ~yt.visualization.volume_rendering.volume_rendering.create_scene
+   ~yt.visualization.volume_rendering.off_axis_projection.off_axis_projection
+   ~yt.visualization.volume_rendering.scene.Scene
    ~yt.visualization.volume_rendering.camera.Camera
-   ~yt.visualization.volume_rendering.camera.off_axis_projection
-   ~yt.visualization.volume_rendering.camera.allsky_projection
+   ~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree
 
-These objects set up the way the image looks:
+The different kinds of sources:
 
 .. autosummary::
    :toctree: generated/
 
-   ~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction
-   ~yt.visualization.volume_rendering.transfer_functions.MultiVariateTransferFunction
-   ~yt.visualization.volume_rendering.transfer_functions.PlanckTransferFunction
-   ~yt.visualization.volume_rendering.transfer_functions.ProjectionTransferFunction
-   ~yt.visualization.volume_rendering.transfer_functions.TransferFunction
+   ~yt.visualization.volume_rendering.render_source.RenderSource
+   ~yt.visualization.volume_rendering.render_source.VolumeSource
+   ~yt.visualization.volume_rendering.render_source.PointSource
+   ~yt.visualization.volume_rendering.render_source.LineSource
+   ~yt.visualization.volume_rendering.render_source.BoxSource
+   ~yt.visualization.volume_rendering.render_source.GridSource
+   ~yt.visualization.volume_rendering.render_source.CoordinateVectorSource
+   ~yt.visualization.volume_rendering.render_source.MeshSource
 
-There are also advanced objects for particular use cases:
+The different kinds of transfer functions:
 
 .. autosummary::
    :toctree: generated/
 
-   ~yt.visualization.volume_rendering.camera.FisheyeCamera
-   ~yt.visualization.volume_rendering.camera.MosaicCamera
-   ~yt.visualization.volume_rendering.camera.PerspectiveCamera
-   ~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree
-   ~yt.visualization.volume_rendering.camera.StereoPairCamera
+   ~yt.visualization.volume_rendering.transfer_functions.TransferFunction
+   ~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction
+   ~yt.visualization.volume_rendering.transfer_functions.ProjectionTransferFunction
+   ~yt.visualization.volume_rendering.transfer_functions.PlanckTransferFunction
+   ~yt.visualization.volume_rendering.transfer_functions.MultiVariateTransferFunction
+   ~yt.visualization.volume_rendering.transfer_function_helper.TransferFunctionHelper
+ 
+The different kinds of lenses:
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.visualization.volume_rendering.lens.Lens
+   ~yt.visualization.volume_rendering.lens.PlaneParallelLens
+   ~yt.visualization.volume_rendering.lens.PerspectiveLens
+   ~yt.visualization.volume_rendering.lens.StereoPerspectiveLens
+   ~yt.visualization.volume_rendering.lens.FisheyeLens
+   ~yt.visualization.volume_rendering.lens.SphericalLens
+   ~yt.visualization.volume_rendering.lens.StereoSphericalLens
 
 Streamlining
 ^^^^^^^^^^^^

diff -r c90653eed93b9a7661c08727d3ef9f67268c2b36 -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:5a1547973517987ff047f1b2405277a0e98392e8fd5ffe04521cb2dc372d32d3"
+  "signature": "sha256:ed09405c56bab51abd351d107a4354726709d289b965f274106f4451b387f5ba"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -25,6 +25,8 @@
       "import numpy as np\n",
       "from IPython.core.display import Image\n",
       "from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper\n",
+      "from yt.visualization.volume_rendering.render_source import VolumeSource\n",
+      "from yt.visualization.volume_rendering.camera import Camera\n",
       "\n",
       "def showme(im):\n",
       "    # screen out NaNs\n",
@@ -66,7 +68,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "tfh = yt.TransferFunctionHelper(ds)"
+      "tfh = TransferFunctionHelper(ds)"
      ],
      "language": "python",
      "metadata": {},
@@ -84,7 +86,7 @@
      "collapsed": false,
      "input": [
       "# Build a transfer function that is a multivariate gaussian in temperature\n",
-      "tfh = yt.TransferFunctionHelper(ds)\n",
+      "tfh = TransferFunctionHelper(ds)\n",
       "tfh.set_field('temperature')\n",
       "tfh.set_log(True)\n",
       "tfh.set_bounds()\n",
@@ -124,7 +126,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "tfh = yt.TransferFunctionHelper(ds)\n",
+      "tfh = TransferFunctionHelper(ds)\n",
       "tfh.set_field('temperature')\n",
       "tfh.set_bounds()\n",
       "tfh.set_log(True)\n",
@@ -143,27 +145,20 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Finally, let's take a look at the volume rendering."
+      "Finally, let's take a look at the volume rendering. First use the helper function to create a default rendering, then we override this with the transfer function we just created."
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "L = [-0.1, -1.0, -0.1]\n",
-      "c = ds.domain_center\n",
-      "W = 1.5*ds.domain_width\n",
-      "Npixels = 512 \n",
-      "cam = ds.camera(c, L, W, Npixels, tfh.tf, fields=['temperature'],\n",
-      "                  north_vector=[1.,0.,0.], steady_north=True, \n",
-      "                  sub_samples=5, no_ghost=False)\n",
+      "im, sc = yt.volume_render(ds, ['temperature'])\n",
       "\n",
-      "# Here we substitute the TransferFunction we constructed earlier.\n",
-      "cam.transfer_function = tfh.tf\n",
+      "source = sc.get_source(0)\n",
+      "source.set_transfer_function(tfh.tf)\n",
+      "im2 = sc.render()\n",
       "\n",
-      "\n",
-      "im = cam.snapshot()\n",
-      "showme(im[:,:,:3])"
+      "showme(im2[:,:,:3])"
      ],
      "language": "python",
      "metadata": {},

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/23153d7b2ca3/
Changeset:   23153d7b2ca3
Branch:      yt
User:        ngoldbaum
Date:        2015-11-15 15:39:29+00:00
Summary:     Fixing flake8 errors introduced after merging with experimental
Affected #:  9 files

diff -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f -r 23153d7b2ca3d89275a505135a45aebd573d2fdd yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -17,7 +17,6 @@
 from yt.testing import requires_file
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load
-import numpy as np
 from numpy.random import RandomState
 import os
 

diff -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f -r 23153d7b2ca3d89275a505135a45aebd573d2fdd yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -267,9 +267,9 @@
         mesh_id = chunk.objs[0].mesh_id
         rv = {}
         for field in fields:
-            ftype, fname = field
+            field_type = field[0]
             nodes_per_element = self.fields[mesh_id][field].shape[1]
-            if fname in self._node_types:
+            if field_type in self._node_types:
                 rv[field] = np.empty((size, nodes_per_element), dtype="float64")
             else:
                 rv[field] = np.empty(size, dtype="float64")

diff -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f -r 23153d7b2ca3d89275a505135a45aebd573d2fdd yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -82,7 +82,6 @@
     from pyparsing import ParseFatalException
 
 def get_window_parameters(axis, center, width, ds):
-    axis_name = ds.coordinates.axis_name[axis]
     width = ds.coordinates.sanitize_width(axis, width, None)
     center, display_center = ds.coordinates.sanitize_center(center, axis)
     xax = ds.coordinates.x_axis[axis]

diff -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f -r 23153d7b2ca3d89275a505135a45aebd573d2fdd yt/visualization/volume_rendering/off_axis_projection.py
--- a/yt/visualization/volume_rendering/off_axis_projection.py
+++ b/yt/visualization/volume_rendering/off_axis_projection.py
@@ -110,7 +110,7 @@
     if method not in ['integrate','sum']:
         raise NotImplementedError("Only 'integrate' or 'sum' methods are valid for off-axis-projections")
 
-    if interpolated == True:
+    if interpolated is True:
         raise NotImplementedError("Only interpolated=False methods are currently implemented for off-axis-projections")
 
 
@@ -163,7 +163,6 @@
     assert (vol.sampler is not None)
 
     mylog.debug("Casting rays")
-    total_cells = 0
     double_check = False
     if double_check:
         for brick in vol.volume.bricks:
@@ -190,7 +189,7 @@
     data_source = ds.region(center, mi, ma)
 
     for i, (grid, mask) in enumerate(data_source.blocks):
-        data = [(grid[field] * mask).astype("float64") for field in fields]
+        data = [(grid[f] * mask).astype("float64") for f in fields]
         pg = PartitionedGrid(
             grid.id, data,
             mask.astype('uint8'),

diff -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f -r 23153d7b2ca3d89275a505135a45aebd573d2fdd yt/visualization/volume_rendering/old_camera.py
--- a/yt/visualization/volume_rendering/old_camera.py
+++ b/yt/visualization/volume_rendering/old_camera.py
@@ -26,7 +26,8 @@
 from .transfer_functions import ProjectionTransferFunction
 
 from yt.utilities.lib.grid_traversal import \
-    arr_fisheye_vectors, \
+    pixelize_healpix, \
+    arr_fisheye_vectors, arr_pix2vec_nest, \
     PartitionedGrid, ProjectionSampler, VolumeRenderSampler, \
     LightSourceRenderSampler, InterpolatedProjectionSampler
 from yt.utilities.lib.misc_utilities import \
@@ -1404,8 +1405,7 @@
         self.light_dir = None
         self.light_rgba = None
         if volume is None:
-            volume = AMRKDTree(self.ds, min_level=min_level,
-                               max_level=max_level, data_source=self.data_source)
+            volume = AMRKDTree(self.ds, data_source=self.data_source)
         self.use_kd = isinstance(volume, AMRKDTree)
         self.volume = volume
 
@@ -1513,64 +1513,6 @@
             plot_allsky_healpix(image[:,0,0], self.nside, fn, label, 
                                 cmin = cmin, cmax = cmax)
 
-class AdaptiveHEALpixCamera(Camera):
-    def __init__(self, center, radius, nside,
-                 transfer_function = None, fields = None,
-                 sub_samples = 5, log_fields = None, volume = None,
-                 ds = None, use_kd=True, no_ghost=False,
-                 rays_per_cell = 0.1, max_nside = 8192):
-        ParallelAnalysisInterface.__init__(self)
-        if ds is not None: self.ds = ds
-        self.center = np.array(center, dtype='float64')
-        self.radius = radius
-        self.use_kd = use_kd
-        if transfer_function is None:
-            transfer_function = ProjectionTransferFunction()
-        self.transfer_function = transfer_function
-        if fields is None: fields = ["density"]
-        self.fields = fields
-        self.sub_samples = sub_samples
-        self.log_fields = log_fields
-        if volume is None:
-            volume = AMRKDTree(self.ds, fields=self.fields, no_ghost=no_ghost,
-                               log_fields=log_fields)
-        self.use_kd = isinstance(volume, AMRKDTree)
-        self.volume = volume
-        self.initial_nside = nside
-        self.rays_per_cell = rays_per_cell
-        self.max_nside = max_nside
-
-    def snapshot(self, fn = None):
-        tfp = TransferFunctionProxy(self.transfer_function)
-        tfp.ns = self.sub_samples
-        self.volume.initialize_source()
-        mylog.info("Adaptively rendering.")
-        pbar = get_pbar("Ray casting",
-                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
-        total_cells = 0
-        bricks = [b for b in self.volume.traverse(None, self.center, None)][::-1]
-        left_edges = np.array([b.LeftEdge for b in bricks])
-        right_edges = np.array([b.RightEdge for b in bricks])
-        min_dx = min(((b.RightEdge[0] - b.LeftEdge[0])/b.my_data[0].shape[0]
-                     for b in bricks))
-        # We jitter a bit if we're on a boundary of our initial grid
-        for i in range(3):
-            if bricks[0].LeftEdge[i] == self.center[i]:
-                self.center += 1e-2 * min_dx
-            elif bricks[0].RightEdge[i] == self.center[i]:
-                self.center -= 1e-2 * min_dx
-        ray_source = AdaptiveRaySource(self.center, self.rays_per_cell,
-                                       self.initial_nside, self.radius,
-                                       bricks, left_edges, right_edges, self.max_nside)
-        for i,brick in enumerate(bricks):
-            ray_source.integrate_brick(brick, tfp, i, left_edges, right_edges,
-                                       bricks)
-            total_cells += np.prod(brick.my_data[0].shape)
-            pbar.update(total_cells)
-        pbar.finish()
-        info, values = ray_source.get_rays()
-        return info, values
-
 
 class StereoPairCamera(Camera):
     def __init__(self, original_camera, relative_separation = 0.005):
@@ -1825,6 +1767,28 @@
 
 data_object_registry["mosaic_camera"] = MosaicCamera
 
+def plot_allsky_healpix(image, nside, fn, label = "", rotation = None,
+                        take_log = True, resolution=512, cmin=None, cmax=None):
+    import matplotlib.figure
+    import matplotlib.backends.backend_agg
+    if rotation is None: rotation = np.eye(3).astype("float64")
+
+    img, count = pixelize_healpix(nside, image, resolution, resolution, rotation)
+
+    fig = matplotlib.figure.Figure((10, 5))
+    ax = fig.add_subplot(1,1,1,projection='aitoff')
+    if take_log: func = np.log10
+    else: func = lambda a: a
+    implot = ax.imshow(func(img), extent=(-np.pi,np.pi,-np.pi/2,np.pi/2),
+                       clip_on=False, aspect=0.5, vmin=cmin, vmax=cmax)
+    cb = fig.colorbar(implot, orientation='horizontal')
+    cb.set_label(label)
+    ax.xaxis.set_ticks(())
+    ax.yaxis.set_ticks(())
+    canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
+    canvas.print_figure(fn)
+    return img, count
+
 class ProjectionCamera(Camera):
     def __init__(self, center, normal_vector, width, resolution,
             field, weight=None, volume=None, no_ghost = False, 

diff -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f -r 23153d7b2ca3d89275a505135a45aebd573d2fdd yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -21,7 +21,7 @@
     GridSource, RenderSource
 from .zbuffer_array import ZBuffer
 from yt.extern.six.moves import builtins
-
+from yt.utilities.exceptions import YTNotInsideNotebook
 
 class Scene(object):
 

diff -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f -r 23153d7b2ca3d89275a505135a45aebd573d2fdd yt/visualization/volume_rendering/tests/modify_transfer_function.py
--- a/yt/visualization/volume_rendering/tests/modify_transfer_function.py
+++ b/yt/visualization/volume_rendering/tests/modify_transfer_function.py
@@ -10,7 +10,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 import yt
-import numpy as np
 from yt.testing import \
     fake_random_ds
 

diff -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f -r 23153d7b2ca3d89275a505135a45aebd573d2fdd yt/visualization/volume_rendering/tests/multiple_fields.py
--- a/yt/visualization/volume_rendering/tests/multiple_fields.py
+++ b/yt/visualization/volume_rendering/tests/multiple_fields.py
@@ -10,7 +10,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 import yt
-import numpy as np
 from yt.testing import \
     fake_random_ds
 

diff -r 7d3c3b3e270c6d75e6dd3747a3d66bc26940dd0f -r 23153d7b2ca3d89275a505135a45aebd573d2fdd yt/visualization/volume_rendering/tests/test_lenses.py
--- a/yt/visualization/volume_rendering/tests/test_lenses.py
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -79,7 +79,6 @@
         cam.lens.fov = 360.0
         cam.set_width(self.ds.domain_width)
         v, c = self.ds.find_max('density')
-        p = self.ds.domain_center.copy()
         cam.set_position(c-0.0005*self.ds.domain_width)
         vol = VolumeSource(dd, field=self.field)
         tf = vol.transfer_function
@@ -96,7 +95,6 @@
         cam = Camera(dd, lens_type='plane-parallel')
         cam.set_width(self.ds.domain_width*1e-2)
         v, c = self.ds.find_max('density')
-        p = self.ds.domain_center.copy()
         vol = VolumeSource(dd, field=self.field)
         tf = vol.transfer_function
         tf.grey_opacity = True


https://bitbucket.org/yt_analysis/yt/commits/e4fe24736180/
Changeset:   e4fe24736180
Branch:      yt
User:        ngoldbaum
Date:        2015-11-15 15:49:50+00:00
Summary:     Moving VR test scripts into proper test functions

This way these tests are actually run by the testing infra
Affected #:  6 files

diff -r 23153d7b2ca3d89275a505135a45aebd573d2fdd -r e4fe2473618056b2d6bcc8ff152028f8f42af197 yt/visualization/volume_rendering/tests/modify_transfer_function.py
--- a/yt/visualization/volume_rendering/tests/modify_transfer_function.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""
-Run a simple volume rendering
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2014, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-import yt
-from yt.testing import \
-    fake_random_ds
-
-ds = fake_random_ds(32)
-im, sc = yt.volume_render(ds)
-
-volume_source = sc.get_source(0)
-tf = volume_source.transfer_function
-tf.clear()
-tf.grey_opacity=True
-tf.add_layers(3, colormap='RdBu')
-sc.render()
-sc.save("new_tf.png")

diff -r 23153d7b2ca3d89275a505135a45aebd573d2fdd -r e4fe2473618056b2d6bcc8ff152028f8f42af197 yt/visualization/volume_rendering/tests/multiple_fields.py
--- a/yt/visualization/volume_rendering/tests/multiple_fields.py
+++ /dev/null
@@ -1,24 +0,0 @@
-"""
-Run a simple volume rendering
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2014, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-import yt
-from yt.testing import \
-    fake_random_ds
-
-ds = fake_random_ds(32)
-im, sc = yt.volume_render(ds)
-
-volume_source = sc.get_source(0)
-volume_source.set_field(('gas','velocity_x'))
-volume_source.build_default_transfer_function()
-sc.render()
-sc.save("render_x.png")
-

diff -r 23153d7b2ca3d89275a505135a45aebd573d2fdd -r e4fe2473618056b2d6bcc8ff152028f8f42af197 yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
--- a/yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""
-Run a simple volume rendering
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2014, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-import yt
-import numpy as np
-from yt.testing import \
-    fake_random_ds
-
-ds = fake_random_ds(32)
-im, sc = yt.volume_render(ds)
-
-angle = 2*np.pi
-frames = 10
-for i in range(frames):
-    sc.camera.yaw(angle/frames)
-    sc.render()
-    sc.save('test_rot_%04i.png' % i, sigma_clip=6.0)

diff -r 23153d7b2ca3d89275a505135a45aebd573d2fdd -r e4fe2473618056b2d6bcc8ff152028f8f42af197 yt/visualization/volume_rendering/tests/simple_scene_creation.py
--- a/yt/visualization/volume_rendering/tests/simple_scene_creation.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""
-Create a simple scene object
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2014, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-import yt
-from yt.testing import \
-    fake_random_ds
-
-ds = fake_random_ds(32)
-sc = yt.create_scene(ds)

diff -r 23153d7b2ca3d89275a505135a45aebd573d2fdd -r e4fe2473618056b2d6bcc8ff152028f8f42af197 yt/visualization/volume_rendering/tests/simple_volume_rendering.py
--- a/yt/visualization/volume_rendering/tests/simple_volume_rendering.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""
-Run a simple volume rendering
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2014, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-import yt
-from yt.testing import \
-    fake_random_ds
-
-ds = fake_random_ds(32)
-im, sc = yt.volume_render(ds, fname='test.png', sigma_clip=4.0)

diff -r 23153d7b2ca3d89275a505135a45aebd573d2fdd -r e4fe2473618056b2d6bcc8ff152028f8f42af197 yt/visualization/volume_rendering/tests/test_varia.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/tests/test_varia.py
@@ -0,0 +1,55 @@
+"""
+Miscellaneous tests for VR infrastructure
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+import yt
+from yt.testing import \
+    fake_random_ds
+
+def test_simple_scene_creation():
+    ds = fake_random_ds(32)
+    yt.create_scene(ds)
+
+def test_modify_transfer_function():
+    ds = fake_random_ds(32)
+    im, sc = yt.volume_render(ds)
+
+    volume_source = sc.get_source(0)
+    tf = volume_source.transfer_function
+    tf.clear()
+    tf.grey_opacity = True
+    tf.add_layers(3, colormap='RdBu')
+    sc.render()
+
+def test_multiple_fields():
+    ds = fake_random_ds(32)
+    im, sc = yt.volume_render(ds)
+
+    volume_source = sc.get_source(0)
+    volume_source.set_field(('gas', 'velocity_x'))
+    volume_source.build_default_transfer_function()
+    sc.render()
+
+def test_rotation_volume_rendering():
+    ds = fake_random_ds(32)
+    im, sc = yt.volume_render(ds)
+
+    angle = 2*np.pi
+    frames = 10
+    for i in range(frames):
+        sc.camera.yaw(angle/frames)
+        sc.render()
+
+def test_simple_volume_rendering():
+    ds = fake_random_ds(32)
+    im, sc = yt.volume_render(ds, sigma_clip=4.0)


https://bitbucket.org/yt_analysis/yt/commits/212ccd3e9a2f/
Changeset:   212ccd3e9a2f
Branch:      yt
User:        ngoldbaum
Date:        2015-11-15 16:56:54+00:00
Summary:     Fix two tests
Affected #:  2 files

diff -r e4fe2473618056b2d6bcc8ff152028f8f42af197 -r 212ccd3e9a2f1eeb7340aba5522623cf80383001 yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -14,9 +14,7 @@
     XSpecThermalModel, XSpecAbsorbModel, \
     ThermalPhotonModel, PhotonList
 from yt.config import ytcfg
-from yt.utilities.answer_testing.framework import \
-    requires_module
-from yt.testing import requires_file
+from yt.testing import requires_file, requires_module
 import numpy as np
 from yt.utilities.physical_ratios import \
     K_per_keV, mass_hydrogen_grams

diff -r e4fe2473618056b2d6bcc8ff152028f8f42af197 -r 212ccd3e9a2f1eeb7340aba5522623cf80383001 yt/visualization/volume_rendering/tests/test_varia.py
--- a/yt/visualization/volume_rendering/tests/test_varia.py
+++ b/yt/visualization/volume_rendering/tests/test_varia.py
@@ -36,8 +36,7 @@
     im, sc = yt.volume_render(ds)
 
     volume_source = sc.get_source(0)
-    volume_source.set_field(('gas', 'velocity_x'))
-    volume_source.build_default_transfer_function()
+    volume_source.set_fields([('gas', 'velocity_x'), ('gas', 'density')])
     sc.render()
 
 def test_rotation_volume_rendering():


https://bitbucket.org/yt_analysis/yt/commits/c1233c6a3e9f/
Changeset:   c1233c6a3e9f
Branch:      yt
User:        ngoldbaum
Date:        2015-11-15 19:25:41+00:00
Summary:     Fix bad import in SZ projection code
Affected #:  1 file

diff -r 212ccd3e9a2f1eeb7340aba5522623cf80383001 -r c1233c6a3e9fb5c02049e4211bcdb03cfd446e4d yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -20,7 +20,8 @@
 
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
 from yt.funcs import fix_axis, mylog, get_pbar
-from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.visualization.volume_rendering.off_axis_projection import \
+    off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     communication_system, parallel_root_only
 from yt import units


https://bitbucket.org/yt_analysis/yt/commits/9dfca5caa760/
Changeset:   9dfca5caa760
Branch:      yt
User:        ngoldbaum
Date:        2015-11-15 22:07:00+00:00
Summary:     Fix transposition of field type and field name
Affected #:  1 file

diff -r c1233c6a3e9fb5c02049e4211bcdb03cfd446e4d -r 9dfca5caa760b7964a4f269592d469ca758071ec yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -267,9 +267,9 @@
         mesh_id = chunk.objs[0].mesh_id
         rv = {}
         for field in fields:
-            field_type = field[0]
+            field_name = field[1]
             nodes_per_element = self.fields[mesh_id][field].shape[1]
-            if field_type in self._node_types:
+            if field_name in self._node_types:
                 rv[field] = np.empty((size, nodes_per_element), dtype="float64")
             else:
                 rv[field] = np.empty(size, dtype="float64")


https://bitbucket.org/yt_analysis/yt/commits/cba5e95e12bd/
Changeset:   cba5e95e12bd
Branch:      yt
User:        bwkeller
Date:        2015-11-16 19:15:45+00:00
Summary:     Merged in ngoldbaum/yt (pull request #1848)

Linting yt.data_objects, yt.utilities, and top-level yt.* submodules. Adding a flake8 test
Affected #:  167 files

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be coding_styleguide.txt
--- /dev/null
+++ b/coding_styleguide.txt
@@ -0,0 +1,101 @@
+Style Guide for Coding in yt
+============================
+
+Coding Style Guide
+------------------
+
+ * In general, follow PEP-8 guidelines.
+   http://www.python.org/dev/peps/pep-0008/
+ * Classes are ``ConjoinedCapitals``, methods and functions are
+   ``lowercase_with_underscores``.
+ * Use 4 spaces, not tabs, to represent indentation.
+ * Line widths should not be more than 80 characters.
+ * Do not use nested classes unless you have a very good reason to, such as
+   requiring a namespace or class-definition modification.  Classes should live
+   at the top level.  ``__metaclass__`` is exempt from this.
+ * Do not use unnecessary parenthesis in conditionals.  ``if((something) and
+   (something_else))`` should be rewritten as
+   ``if something and something_else``. Python is more forgiving than C.
+ * Avoid copying memory when possible. For example, don't do
+   ``a = a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3``
+   should be ``np.multiply(a, 3, a)``.
+ * In general, avoid all double-underscore method names: ``__something`` is
+   usually unnecessary.
+ * When writing a subclass, use the super built-in to access the super class,
+   rather than explicitly. Ex: ``super(SpecialGridSubclass, self).__init__()``
+   rather than ``SpecialGrid.__init__()``.
+ * Docstrings should describe input, output, behavior, and any state changes
+   that occur on an object.  See the file ``doc/docstring_example.txt`` for a
+   fiducial example of a docstring.
+ * Use only one top-level import per line. Unless there is a good reason not to,
+   imports should happen at the top of the file, after the copyright blurb.
+ * Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use
+   ``is`` or ``is not``.
+ * If you are comparing with a numpy boolean array, just refer to the array.
+   Ex: do ``np.all(array)`` instead of ``np.all(array == True)``.
+ * Never comapre with None using ``==`` or ``!=``, use ``is None`` or
+   ``is not None``.
+ * Use ``statement is not True`` instead of ``not statement is True``
+ * Only one statement per line, do not use semicolons to put two or more
+   statements on a single line.
+ * Only declare local variables if they will be used later. If you do not use the
+   return value of a function, do not store it in a variable.
+ * Add tests for new functionality. When fixing a bug, consider adding a test to
+   prevent the bug from recurring.
+
+API Guide
+---------
+
+ * Do not use ``from some_module import *``
+ * Internally, only import from source files directly -- instead of:
+
+     ``from yt.visualization.api import ProjectionPlot``
+
+   do:
+
+     ``from yt.visualization.plot_window import ProjectionPlot``
+
+ * Import symbols from the module where they are defined, avoid transitive
+   imports.
+ * Import standard library modules, functions, and classes from builtins, do not
+   import them from other yt files.
+ * Numpy is to be imported as ``np``.
+ * Do not use too many keyword arguments.  If you have a lot of keyword
+   arguments, then you are doing too much in ``__init__`` and not enough via
+   parameter setting.
+ * In function arguments, place spaces before commas.  ``def something(a,b,c)``
+   should be ``def something(a, b, c)``.
+ * Don't create a new class to replicate the functionality of an old class --
+   replace the old class.  Too many options makes for a confusing user
+   experience.
+ * Parameter files external to yt are a last resort.
+ * The usage of the ``**kwargs`` construction should be avoided.  If they cannot
+   be avoided, they must be explained, even if they are only to be passed on to
+   a nested function.
+
+Variable Names and Enzo-isms
+----------------------------
+Avoid Enzo-isms.  This includes but is not limited to:
+
+ * Hard-coding parameter names that are the same as those in Enzo.  The
+   following translation table should be of some help.  Note that the
+   parameters are now properties on a ``Dataset`` subclass: you access them
+   like ds.refine_by .
+
+    - ``RefineBy `` => `` refine_by``
+    - ``TopGridRank `` => `` dimensionality``
+    - ``TopGridDimensions `` => `` domain_dimensions``
+    - ``InitialTime `` => `` current_time``
+    - ``DomainLeftEdge `` => `` domain_left_edge``
+    - ``DomainRightEdge `` => `` domain_right_edge``
+    - ``CurrentTimeIdentifier `` => `` unique_identifier``
+    - ``CosmologyCurrentRedshift `` => `` current_redshift``
+    - ``ComovingCoordinates `` => `` cosmological_simulation``
+    - ``CosmologyOmegaMatterNow `` => `` omega_matter``
+    - ``CosmologyOmegaLambdaNow `` => `` omega_lambda``
+    - ``CosmologyHubbleConstantNow `` => `` hubble_constant``
+
+ * Do not assume that the domain runs from 0 .. 1.  This is not true
+   everywhere.
+ * Variable names should be short but descriptive.
+ * No globals!

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-Style Guide for Coding in yt
-============================
-
-Coding Style Guide
-------------------
-
- * In general, follow PEP-8 guidelines.
-   http://www.python.org/dev/peps/pep-0008/
- * Classes are ConjoinedCapitals, methods and functions are
-   lowercase_with_underscores.
- * Use 4 spaces, not tabs, to represent indentation.
- * Line widths should not be more than 80 characters.
- * Do not use nested classes unless you have a very good reason to, such as
-   requiring a namespace or class-definition modification.  Classes should live
-   at the top level.  __metaclass__ is exempt from this.
- * Do not use unnecessary parenthesis in conditionals.  if((something) and
-   (something_else)) should be rewritten as if something and something_else.
-   Python is more forgiving than C.
- * Avoid copying memory when possible. For example, don't do 
-   "a = a.reshape(3,4)" when "a.shape = (3,4)" will do, and "a = a * 3" should
-   be "np.multiply(a, 3, a)".
- * In general, avoid all double-underscore method names: __something is usually
-   unnecessary.
- * When writing a subclass, use the super built-in to access the super class,
-   rather than explicitly. Ex: "super(SpecialGrid, self).__init__()" rather than
-   "SpecialGrid.__init__()".
- * Doc strings should describe input, output, behavior, and any state changes
-   that occur on an object.  See the file `doc/docstring_example.txt` for a
-   fiducial example of a docstring.
-
-API Guide
----------
-
- * Do not import "*" from anything other than "yt.funcs".
- * Internally, only import from source files directly -- instead of:
-
-   from yt.visualization.api import ProjectionPlot
-
-   do:
-
-   from yt.visualization.plot_window import ProjectionPlot
-
- * Numpy is to be imported as "np", after a long time of using "na".
- * Do not use too many keyword arguments.  If you have a lot of keyword
-   arguments, then you are doing too much in __init__ and not enough via
-   parameter setting.
- * In function arguments, place spaces before commas.  def something(a,b,c)
-   should be def something(a, b, c).
- * Don't create a new class to replicate the functionality of an old class --
-   replace the old class.  Too many options makes for a confusing user
-   experience.
- * Parameter files external to yt are a last resort.
- * The usage of the **kwargs construction should be avoided.  If they cannot
-   be avoided, they must be explained, even if they are only to be passed on to
-   a nested function.
-
-Variable Names and Enzo-isms
-----------------------------
-
- * Avoid Enzo-isms.  This includes but is not limited to:
-   * Hard-coding parameter names that are the same as those in Enzo.  The
-     following translation table should be of some help.  Note that the
-     parameters are now properties on a Dataset subclass: you access them
-     like ds.refine_by .
-     * RefineBy => refine_by
-     * TopGridRank => dimensionality
-     * TopGridDimensions => domain_dimensions
-     * InitialTime => current_time
-     * DomainLeftEdge => domain_left_edge
-     * DomainRightEdge => domain_right_edge
-     * CurrentTimeIdentifier => unique_identifier
-     * CosmologyCurrentRedshift => current_redshift
-     * ComovingCoordinates => cosmological_simulation
-     * CosmologyOmegaMatterNow => omega_matter
-     * CosmologyOmegaLambdaNow => omega_lambda
-     * CosmologyHubbleConstantNow => hubble_constant
-   * Do not assume that the domain runs from 0 .. 1.  This is not true
-     everywhere.
- * Variable names should be short but descriptive.
- * No globals!

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -59,7 +59,7 @@
   from yt.analysis_modules.halo_finding.api import *
 
   ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
-  halo_list = parallelHF(ds)
+  halo_list = HaloFinder(ds)
   halo_list.dump('MyHaloList')
 
 Ellipsoid Parameters

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -501,11 +501,7 @@
 subtle art in estimating the amount of memory needed for halo finding, but a
 rule of thumb is that the HOP halo finder is the most memory intensive
 (:func:`HaloFinder`), and Friends of Friends (:func:`FOFHaloFinder`) being the
-most memory-conservative.  It has been found that :func:`parallelHF` needs
-roughly 1 MB of memory per 5,000 particles, although recent work has improved
-this and the memory requirement is now smaller than this. But this is a good
-starting point for beginning to calculate the memory required for halo-finding.
-For more information, see :ref:`halo_finding`.
+most memory-conservative. For more information, see :ref:`halo_finding`.
 
 **Volume Rendering**
 

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -494,80 +494,4 @@
 
 .. _code-style-guide:
 
-Code Style Guide
-----------------
-
-To keep things tidy, we try to stick with a couple simple guidelines.
-
-General Guidelines
-++++++++++++++++++
-
-* In general, follow `PEP-8 <http://www.python.org/dev/peps/pep-0008/>`_ guidelines.
-* Classes are ConjoinedCapitals, methods and functions are
-  ``lowercase_with_underscores.``
-* Use 4 spaces, not tabs, to represent indentation.
-* Line widths should not be more than 80 characters.
-* Do not use nested classes unless you have a very good reason to, such as
-  requiring a namespace or class-definition modification.  Classes should live
-  at the top level.  ``__metaclass__`` is exempt from this.
-* Do not use unnecessary parentheses in conditionals.  ``if((something) and
-  (something_else))`` should be rewritten as ``if something and
-  something_else``.  Python is more forgiving than C.
-* Avoid copying memory when possible. For example, don't do ``a =
-  a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3`` should be
-  ``np.multiply(a, 3, a)``.
-* In general, avoid all double-underscore method names: ``__something`` is
-  usually unnecessary.
-* Doc strings should describe input, output, behavior, and any state changes
-  that occur on an object.  See the file `doc/docstring_example.txt` for a
-  fiducial example of a docstring.
-
-API Guide
-+++++++++
-
-* Do not import "*" from anything other than ``yt.funcs``.
-* Internally, only import from source files directly; instead of: ``from
-  yt.visualization.api import SlicePlot`` do
-  ``from yt.visualization.plot_window import SlicePlot``.
-* Numpy is to be imported as ``np``.
-* Do not use too many keyword arguments.  If you have a lot of keyword
-  arguments, then you are doing too much in ``__init__`` and not enough via
-  parameter setting.
-* In function arguments, place spaces before commas.  ``def something(a,b,c)``
-  should be ``def something(a, b, c)``.
-* Don't create a new class to replicate the functionality of an old class --
-  replace the old class.  Too many options makes for a confusing user
-  experience.
-* Parameter files external to yt are a last resort.
-* The usage of the ``**kwargs`` construction should be avoided.  If they
-  cannot be avoided, they must be explained, even if they are only to be
-  passed on to a nested function.
-* Constructor APIs should be kept as *simple* as possible.
-* Variable names should be short but descriptive.
-* No global variables!
-
-Variable Names and Enzo-isms
-++++++++++++++++++++++++++++
-
-* Avoid Enzo-isms.  This includes but is not limited to:
-
-  + Hard-coding parameter names that are the same as those in Enzo.  The
-    following translation table should be of some help.  Note that the
-    parameters are now properties on a Dataset subclass: you access them
-    like ``ds.refine_by`` .
-
-    - ``RefineBy `` => `` refine_by``
-    - ``TopGridRank `` => `` dimensionality``
-    - ``TopGridDimensions `` => `` domain_dimensions``
-    - ``InitialTime `` => `` current_time``
-    - ``DomainLeftEdge `` => `` domain_left_edge``
-    - ``DomainRightEdge `` => `` domain_right_edge``
-    - ``CurrentTimeIdentifier `` => `` unique_identifier``
-    - ``CosmologyCurrentRedshift `` => `` current_redshift``
-    - ``ComovingCoordinates `` => `` cosmological_simulation``
-    - ``CosmologyOmegaMatterNow `` => `` omega_matter``
-    - ``CosmologyOmegaLambdaNow `` => `` omega_lambda``
-    - ``CosmologyHubbleConstantNow `` => `` hubble_constant``
-
-  + Do not assume that the domain runs from 0 to 1.  This is not true
-    for many codes and datasets.
+.. include:: ../../../coding_styleguide.txt

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -9,7 +9,11 @@
 with-xunit=1
 
 [flake8]
-# if we include api.py files, we get tons of spurious "imported but unused" errors
-exclude = */api.py,*/__config__.py,yt/visualization/_mpl_imports.py
+# we exclude:
+#      api.py and __init__.py files to avoid spurious unused import errors
+#      _mpl_imports.py for the same reason
+#      autogenerated __config__.py files
+#      vendored libraries
+exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
 max-line-length=999
-ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E302,E303,E401,E502,E701,E703,W291,W293,W391
\ No newline at end of file
+ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E402,E502,E701,E703,E731,W291,W293,W391,W503
\ No newline at end of file

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be tests/README
--- /dev/null
+++ b/tests/README
@@ -0,0 +1,3 @@
+This directory contains two tiny enzo cosmological datasets. 
+
+They were added a long time ago and are provided for testing purposes.
\ No newline at end of file

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be tests/boolean_regions.py
--- a/tests/boolean_regions.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.boolean_region_tests import \
-    TestBooleanANDGridQuantity, TestBooleanORGridQuantity, \
-    TestBooleanNOTGridQuantity, TestBooleanANDParticleQuantity, \
-    TestBooleanORParticleQuantity, TestBooleanNOTParticleQuantity
-
-create_test(TestBooleanANDGridQuantity, "BooleanANDGrid")
-
-create_test(TestBooleanORGridQuantity, "BooleanORGrid")
-
-create_test(TestBooleanNOTGridQuantity, "BooleanNOTGrid")
-
-create_test(TestBooleanANDParticleQuantity, "BooleanANDParticle")
-
-create_test(TestBooleanORParticleQuantity, "BooleanORParticle")
-
-create_test(TestBooleanNOTParticleQuantity, "BooleanNOTParticle")

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be tests/fields_to_test.py
--- a/tests/fields_to_test.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# We want to test several things.  We need to be able to run the
-
-field_list = ["Density", "Temperature", "x-velocity", "y-velocity",
-    "z-velocity",
-    # Now some derived fields
-    "Pressure", "SoundSpeed", "particle_density", "Entropy",
-    # Ghost zones
-    "AveragedDensity", "DivV"]
-
-particle_field_list = ["particle_position_x", "ParticleMassMsun"]

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be tests/halos.py
--- a/tests/halos.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.halo_tests import \
-    TestHaloCountHOP, TestHaloCountFOF, TestHaloCountPHOP
-
-create_test(TestHaloCountHOP, "halo_count_HOP", threshold=80.0)
-
-create_test(TestHaloCountFOF, "halo_count_FOF", link=0.2, padding=0.02)
-
-create_test(TestHaloCountPHOP, "halo_count_PHOP", threshold=80.0)

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be tests/hierarchy_consistency.py
--- a/tests/hierarchy_consistency.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTDatasetTest, RegressionTestException
-from yt.funcs import ensure_list
-
-
-class HierarchyInconsistent(RegressionTestException):
-    pass
-
-
-class HierarchyConsistency(YTDatasetTest):
-    name = "index_consistency"
-
-    def run(self):
-        self.result = \
-            all(g in ensure_list(c.Parent) for g in self.ds.index.grids
-                                            for c in g.Children)
-
-    def compare(self, old_result):
-        if not(old_result and self.result): raise HierarchyInconsistent()
-
-
-class GridLocationsProperties(YTDatasetTest):
-    name = "level_consistency"
-
-    def run(self):
-        self.result = dict(grid_left_edge=self.ds.grid_left_edge,
-                           grid_right_edge=self.ds.grid_right_edge,
-                           grid_levels=self.ds.grid_levels,
-                           grid_particle_count=self.ds.grid_particle_count,
-                           grid_dimensions=self.ds.grid_dimensions)
-
-    def compare(self, old_result):
-        # We allow now difference between these values
-        self.compare_data_arrays(self.result, old_result, 0.0)
-
-
-class GridRelationshipsChanged(RegressionTestException):
-    pass
-
-
-class GridRelationships(YTDatasetTest):
-
-    name = "grid_relationships"
-
-    def run(self):
-        self.result = [[p.id for p in ensure_list(g.Parent) \
-            if g.Parent is not None]
-            for g in self.ds.index.grids]
-
-    def compare(self, old_result):
-        if len(old_result) != len(self.result):
-            raise GridRelationshipsChanged()
-        for plist1, plist2 in zip(old_result, self.result):
-            if len(plist1) != len(plist2): raise GridRelationshipsChanged()
-            if not all((p1 == p2 for p1, p2 in zip(plist1, plist2))):
-                raise GridRelationshipsChanged()
-
-
-class GridGlobalIndices(YTDatasetTest):
-    name = "global_startindex"
-
-    def run(self):
-        self.result = na.array([g.get_global_startindex()
-                                for g in self.ds.index.grids])
-
-    def compare(self, old_result):
-        self.compare_array_delta(old_result, self.result, 0.0)

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be tests/object_field_values.py
--- a/tests/object_field_values.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import hashlib
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTDatasetTest, RegressionTestException, create_test
-from yt.funcs import ensure_list, iterable
-from fields_to_test import field_list, particle_field_list
-
-
-class FieldHashesDontMatch(RegressionTestException):
-    pass
-
-known_objects = {}
-
-
-def register_object(func):
-    known_objects[func.func_name] = func
-    return func
-
-
- at register_object
-def centered_sphere(tobj):
-    center = 0.5 * (tobj.ds.domain_right_edge + tobj.ds.domain_left_edge)
-    width = (tobj.ds.domain_right_edge - tobj.ds.domain_left_edge).max()
-    tobj.data_object = tobj.ds.sphere(center, width / 0.25)
-
-
- at register_object
-def off_centered_sphere(tobj):
-    center = 0.5 * (tobj.ds.domain_right_edge + tobj.ds.domain_left_edge)
-    width = (tobj.ds.domain_right_edge - tobj.ds.domain_left_edge).max()
-    tobj.data_object = tobj.ds.sphere(center - 0.25 * width, width / 0.25)
-
-
- at register_object
-def corner_sphere(tobj):
-    width = (tobj.ds.domain_right_edge - tobj.ds.domain_left_edge).max()
-    tobj.data_object = tobj.ds.sphere(tobj.ds.domain_left_edge, width / 0.25)
-
-
- at register_object
-def disk(self):
-    center = (self.ds.domain_right_edge + self.ds.domain_left_edge) / 2.
-    radius = (self.ds.domain_right_edge - self.ds.domain_left_edge).max() / 10.
-    height = (self.ds.domain_right_edge - self.ds.domain_left_edge).max() / 10.
-    normal = na.array([1.] * 3)
-    self.data_object = self.ds.disk(center, normal, radius, height)
-
-
- at register_object
-def all_data(self):
-    self.data_object = self.ds.all_data()
-
-_new_known_objects = {}
-for field in ["Density"]:  # field_list:
-    for object_name in known_objects:
-
-        def _rfunc(oname, fname):
-
-            def func(tobj):
-                known_objects[oname](tobj)
-                tobj.orig_data_object = tobj.data_object
-                avg_value = tobj.orig_data_object.quantities[
-                        "WeightedAverageQuantity"](fname, "Density")
-                tobj.data_object = tobj.orig_data_object.cut_region(
-                        ["grid['%s'] > %s" % (fname, avg_value)])
-            return func
-        _new_known_objects["%s_cut_region_%s" % (object_name, field)] = \
-                _rfunc(object_name, field)
-known_objects.update(_new_known_objects)
-
-
-class YTFieldValuesTest(YTDatasetTest):
-
-    def run(self):
-        vals = self.data_object[self.field].copy()
-        vals.sort()
-        self.result = hashlib.sha256(vals.tostring()).hexdigest()
-
-    def compare(self, old_result):
-        if self.result != old_result: raise FieldHashesDontMatch
-
-    def setup(self):
-        YTDatasetTest.setup(self)
-        known_objects[self.object_name](self)
-
-
-class YTExtractIsocontoursTest(YTFieldValuesTest):
-
-    def run(self):
-        val = self.data_object.quantities["WeightedAverageQuantity"](
-            "Density", "Density")
-        rset = self.data_object.extract_isocontours("Density",
-            val, rescale=False, sample_values="Temperature")
-        self.result = rset
-
-    def compare(self, old_result):
-        if self.result[0].size == 0 and old_result[0].size == 0:
-            return True
-        self.compare_array_delta(self.result[0].ravel(),
-                                 old_result[0].ravel(), 1e-7)
-        self.compare_array_delta(self.result[1], old_result[1], 1e-7)
-
-
-class YTIsocontourFluxTest(YTFieldValuesTest):
-
-    def run(self):
-        val = self.data_object.quantities["WeightedAverageQuantity"](
-            "Density", "Density")
-        flux = self.data_object.calculate_isocontour_flux(
-           "Density", val, "x-velocity", "y-velocity", "z-velocity")
-        self.result = flux
-
-    def compare(self, old_result):
-        self.compare_value_delta(self.result, old_result, 1e-7)
-
-for object_name in known_objects:
-    for field in field_list + particle_field_list:
-        if "cut_region" in object_name and field in particle_field_list:
-            continue
-        create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
-                    field=field, object_name=object_name)
-    create_test(YTExtractIsocontoursTest, "%s" % (object_name),
-                object_name=object_name)
-    create_test(YTIsocontourFluxTest, "%s" % (object_name),
-                object_name=object_name)
-
-
-class YTDerivedQuantityTest(YTDatasetTest):
-
-    def setup(self):
-        YTDatasetTest.setup(self)
-        known_objects[self.object_name](self)
-
-    def compare(self, old_result):
-        if hasattr(self.result, 'tostring'):
-            self.compare_array_delta(self.result, old_result, 1e-7)
-            return
-        elif iterable(self.result):
-            a1 = na.array(self.result)
-            a2 = na.array(old_result)
-            self.compare_array_delta(a1, a2, 1e-7)
-        else:
-            if self.result != old_result: raise FieldHashesDontMatch
-
-    def run(self):
-        # This only works if it takes no arguments
-        self.result = self.data_object.quantities[self.dq_name]()
-
-dq_names = ["TotalMass", "AngularMomentumVector", "CenterOfMass",
-            "BulkVelocity", "BaryonSpinParameter", "ParticleSpinParameter"]
-
-# Extrema, WeightedAverageQuantity, TotalQuantity, MaxLocation,
-# MinLocation
-
-for object_name in known_objects:
-    for dq in dq_names:
-        # Some special exceptions
-        if "cut_region" in object_name and (
-            "SpinParameter" in dq or
-            "TotalMass" in dq):
-            continue
-        create_test(YTDerivedQuantityTest, "%s_%s" % (object_name, dq),
-                    dq_name=dq, object_name=object_name)
-
-
-class YTDerivedQuantityTestField(YTDerivedQuantityTest):
-
-    def run(self):
-        self.result = self.data_object.quantities[self.dq_name](
-            self.field_name)
-
-for object_name in known_objects:
-    for field in field_list:
-        for dq in ["Extrema", "TotalQuantity", "MaxLocation", "MinLocation"]:
-            create_test(YTDerivedQuantityTestField,
-                        "%s_%s" % (object_name, field),
-                        field_name=field, dq_name=dq,
-                        object_name=object_name)
-
-
-class YTDerivedQuantityTest_WeightedAverageQuantity(YTDerivedQuantityTest):
-
-    def run(self):
-        self.result = self.data_object.quantities["WeightedAverageQuantity"](
-            self.field_name, weight="CellMassMsun")
-
-for object_name in known_objects:
-    for field in field_list:
-        create_test(YTDerivedQuantityTest_WeightedAverageQuantity,
-                    "%s_%s" % (object_name, field),
-                    field_name=field,
-                    object_name=object_name)

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be tests/projections.py
--- a/tests/projections.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestOffAxisProjection, TestSlice, \
-    TestRay, TestGasDistribution, Test2DGasDistribution
-
-from fields_to_test import field_list
-
-for field in field_list:
-    create_test(TestRay, "%s" % field, field=field)
-
-for axis in range(3):
-    for field in field_list:
-        create_test(TestSlice, "%s_%s" % (axis, field),
-                    field=field, axis=axis)
-
-for axis in range(3):
-    for field in field_list:
-        create_test(TestProjection, "%s_%s" % (axis, field),
-                    field=field, axis=axis)
-        create_test(TestProjection, "%s_%s_Density" % (axis, field),
-                    field=field, axis=axis, weight_field="Density")
-
-for field in field_list:
-    create_test(TestOffAxisProjection, "%s_%s" % (axis, field),
-                field=field, axis=axis)
-    create_test(TestOffAxisProjection, "%s_%s_Density" % (axis, field),
-                field=field, axis=axis, weight_field="Density")
-
-for field in field_list:
-    if field != "Density":
-        create_test(TestGasDistribution, "density_%s" % field,
-                    field_x="Density", field_y=field)
-    if field not in ("x-velocity", "Density"):
-        create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
-                    field_x="Density", field_y="x-velocity", field_z=field,
-                    weight="CellMassMsun")

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be tests/runall.py
--- a/tests/runall.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import matplotlib
-matplotlib.use('Agg')
-from yt.config import ytcfg
-ytcfg["yt", "loglevel"] = "50"
-ytcfg["yt", "serialize"] = "False"
-
-from yt.utilities.answer_testing.api import \
-    RegressionTestRunner, clear_registry, create_test, \
-    TestFieldStatistics, TestAllProjections, registry_entries, \
-    Xunit
-from yt.utilities.command_line import get_yt_version
-
-from yt.mods import *
-import fnmatch
-import imp
-import optparse
-import itertools
-import time
-
-#
-# We assume all tests are to be run, unless explicitly given the name of a
-# single test or something that can be run through fnmatch.
-#
-# Keep in mind that we use a different nomenclature here than is used in the
-# Enzo testing system.  Our 'tests' are actually tests that are small and that
-# run relatively quickly on a single dataset; in Enzo's system, a 'test'
-# encompasses both the creation and the examination of data.  Here we assume
-# the data is kept constant.
-#
-
-cwd = os.path.dirname(globals().get("__file__", os.getcwd()))
-
-
-def load_tests(iname, idir):
-    f, filename, desc = imp.find_module(iname, [idir])
-    tmod = imp.load_module(iname, f, filename, desc)
-    return tmod
-
-
-def find_and_initialize_tests():
-    mapping = {}
-    for f in glob.glob(os.path.join(cwd, "*.py")):
-        clear_registry()
-        iname = os.path.basename(f[:-3])
-        try:
-            load_tests(iname, cwd)
-            mapping[iname] = registry_entries()
-            #print "Associating %s with" % (iname)
-            #print "\n    ".join(registry_entries())
-        except ImportError:
-            pass
-    return mapping
-
-if __name__ == "__main__":
-    clear_registry()
-    mapping = find_and_initialize_tests()
-    test_storage_directory = ytcfg.get("yt", "test_storage_dir")
-    try:
-        my_hash = get_yt_version()
-    except:
-        my_hash = "UNKNOWN%s" % (time.time())
-    parser = optparse.OptionParser()
-    parser.add_option("-f", "--parameter-file", dest="parameter_file",
-        default=os.path.join(cwd, "DD0010/moving7_0010"),
-        help="The parameter file value to feed to 'load' to test against")
-    parser.add_option("-l", "--list", dest="list_tests", action="store_true",
-        default=False, help="List all tests and then exit")
-    parser.add_option("-t", "--tests", dest="test_pattern", default="*",
-        help="The test name pattern to match.  Can include wildcards.")
-    parser.add_option("-o", "--output", dest="storage_dir",
-        default=test_storage_directory,
-        help="Base directory for storing test output.")
-    parser.add_option("-c", "--compare", dest="compare_name",
-        default=None,
-        help="The name against which we will compare")
-    parser.add_option("-n", "--name", dest="this_name",
-        default=my_hash,
-        help="The name we'll call this set of tests")
-    opts, args = parser.parse_args()
-
-    if opts.list_tests:
-        tests_to_run = []
-        for m, vals in mapping.items():
-            new_tests = fnmatch.filter(vals, opts.test_pattern)
-            if len(new_tests) == 0: continue
-            load_tests(m, cwd)
-            keys = set(registry_entries())
-            tests_to_run += [t for t in new_tests if t in keys]
-        tests = list(set(tests_to_run))
-        print ("\n    ".join(tests))
-        sys.exit(0)
-
-    # Load the test ds and make sure it's good.
-    ds = load(opts.parameter_file)
-    if ds is None:
-        print "Couldn't load the specified parameter file."
-        sys.exit(1)
-
-    # Now we modify our compare name and self name to include the ds.
-    compare_id = opts.compare_name
-    watcher = None
-    if compare_id is not None:
-        compare_id += "_%s_%s" % (ds, ds._hash())
-        watcher = Xunit()
-    this_id = opts.this_name + "_%s_%s" % (ds, ds._hash())
-
-    rtr = RegressionTestRunner(this_id, compare_id,
-                               results_path=opts.storage_dir,
-                               compare_results_path=opts.storage_dir,
-                               io_log=[opts.parameter_file])
-
-    rtr.watcher = watcher
-    tests_to_run = []
-    for m, vals in mapping.items():
-        new_tests = fnmatch.filter(vals, opts.test_pattern)
-
-        if len(new_tests) == 0: continue
-        load_tests(m, cwd)
-        keys = set(registry_entries())
-        tests_to_run += [t for t in new_tests if t in keys]
-    for test_name in sorted(tests_to_run):
-        print "RUNNING TEST", test_name
-        rtr.run_test(test_name)
-    if watcher is not None:
-        rtr.watcher.report()
-    failures = 0
-    passes = 1
-    for test_name, result in sorted(rtr.passed_tests.items()):
-        if not result:
-            print "TEST %s: %s" % (test_name, result)
-            print "    %s" % rtr.test_messages[test_name]
-        if result: passes += 1
-        else: failures += 1
-    print "Number of passes  : %s" % passes
-    print "Number of failures: %s" % failures

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be tests/volume_rendering.py
--- a/tests/volume_rendering.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from yt.mods import *
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTDatasetTest, RegressionTestException
-from yt.funcs import ensure_list
-
-
-class VolumeRenderingInconsistent(RegressionTestException):
-    pass
-
-
-class VolumeRenderingConsistency(YTDatasetTest):
-    name = "volume_rendering_consistency"
-
-    def run(self):
-        c = (self.ds.domain_right_edge + self.ds.domain_left_edge) / 2.
-        W = na.sqrt(3.) * (self.ds.domain_right_edge - \
-            self.ds.domain_left_edge)
-        N = 512
-        n_contours = 5
-        cmap = 'algae'
-        field = 'Density'
-        mi, ma = self.ds.all_data().quantities['Extrema'](field)[0]
-        mi, ma = na.log10(mi), na.log10(ma)
-        contour_width = (ma - mi) / 100.
-        L = na.array([1.] * 3)
-        tf = ColorTransferFunction((mi - 2, ma + 2))
-        tf.add_layers(n_contours, w=contour_width,
-                      col_bounds=(mi * 1.001, ma * 0.999),
-                      colormap=cmap, alpha=na.logspace(-1, 0, n_contours))
-        cam = self.ds.camera(c, L, W, (N, N), transfer_function=tf,
-            no_ghost=True)
-        image = cam.snapshot()
-        # image = cam.snapshot('test_rendering_%s.png'%field)
-        self.result = image
-
-    def compare(self, old_result):
-        # Compare the deltas; give a leeway of 1e-8
-        delta = na.nanmax(na.abs(self.result - old_result) /
-                                 (self.result + old_result))
-        if delta > 1e-9: raise VolumeRenderingInconsistent()

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -121,7 +121,6 @@
     derived_field
 
 from yt.data_objects.api import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     DatasetSeries, ImageArray, \
     particle_filter, add_particle_filter, \
     create_profile, Profile1D, Profile2D, Profile3D, \

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -281,8 +281,6 @@
         errSq=sum(dif**2)
 
         if any(linesP[:,1]==speciesDict['init_b']):
-         #   linesP = prevLinesP
-
             flag = True
             break
             

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -30,10 +30,10 @@
     get_rotation_matrix, \
     periodic_dist
 from yt.utilities.physical_constants import \
-    mass_sun_cgs, \
+    mass_sun_cgs
+from yt.utilities.physical_ratios import \
+    rho_crit_g_cm3_h2, \
     TINY
-from yt.utilities.physical_ratios import \
-    rho_crit_g_cm3_h2
 
 from .hop.EnzoHop import RunHOP
 from .fof.EnzoFOF import RunFOF

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -14,9 +14,7 @@
     XSpecThermalModel, XSpecAbsorbModel, \
     ThermalPhotonModel, PhotonList
 from yt.config import ytcfg
-from yt.utilities.answer_testing.framework import \
-    requires_module
-from yt.testing import requires_file
+from yt.testing import requires_file, requires_module
 import numpy as np
 from yt.utilities.physical_ratios import \
     K_per_keV, mass_hydrogen_grams

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -17,7 +17,6 @@
 from yt.testing import requires_file
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load
-import numpy as np
 from numpy.random import RandomState
 import os
 

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/analysis_modules/photon_simulator/tests/test_spectra.py
--- a/yt/analysis_modules/photon_simulator/tests/test_spectra.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_spectra.py
@@ -1,9 +1,8 @@
 from yt.analysis_modules.photon_simulator.api import \
     TableApecModel, XSpecThermalModel
-import numpy as np
 from yt.testing import requires_module, fake_random_ds
 from yt.utilities.answer_testing.framework import \
-    GenericArrayTest, data_dir_load
+    GenericArrayTest
 from yt.config import ytcfg
 
 def setup():

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -19,11 +19,11 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
-from yt.units.yt_array import YTQuantity
-from yt.funcs import fix_axis, mylog, iterable, get_pbar
-from yt.visualization.volume_rendering.api import off_axis_projection
+from yt.funcs import fix_axis, mylog, get_pbar
+from yt.visualization.volume_rendering.off_axis_projection import \
+    off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     communication_system, parallel_root_only
+    communication_system, parallel_root_only
 from yt import units
 from yt.utilities.on_demand_imports import _astropy
 

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -12,8 +12,17 @@
 
 from yt.frontends.stream.api import load_uniform_grid
 from yt.funcs import get_pbar
-from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, \
-    mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
+from yt.utilities.physical_ratios import \
+    cm_per_kpc, \
+    K_per_keV, \
+    cm_per_km
+from yt.utilities.physical_constants import \
+    mh, \
+    kboltz, \
+    Tcmb, \
+    hcgs, \
+    clight, \
+    sigma_thompson
 from yt.testing import requires_module, assert_almost_equal
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load, GenericImageTest

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -26,7 +26,9 @@
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 
-import math, inspect, time
+import math
+import inspect
+import time
 from collections import defaultdict
 
 sep = 12

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -16,7 +16,6 @@
 #-----------------------------------------------------------------------------
 
 import os
-import types
 from yt.extern.six.moves import configparser
 
 ytcfg_defaults = dict(

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -13,16 +13,19 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import os, os.path, types
+import os
 
 # Named imports
 from yt.extern.six import string_types
-from yt.funcs import *
 from yt.config import ytcfg
+from yt.funcs import mylog
 from yt.utilities.parameter_file_storage import \
     output_type_registry, \
     simulation_time_series_registry, \
     EnzoRunDatabase
+from yt.utilities.exceptions import \
+    YTOutputNotIdentified, \
+    YTSimulationNotIdentified
 from yt.utilities.hierarchy_inspection import find_lowest_subclasses
 
 def load(*args ,**kwargs):

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/data_objects/analyzer_objects.py
--- a/yt/data_objects/analyzer_objects.py
+++ b/yt/data_objects/analyzer_objects.py
@@ -15,7 +15,6 @@
 
 import inspect
 
-from yt.funcs import *
 from yt.extern.six import add_metaclass
 
 analysis_task_registry = {}
@@ -23,7 +22,7 @@
 class RegisteredTask(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
-        if hasattr(cls, "skip") and cls.skip == False:
+        if hasattr(cls, "skip") and cls.skip is False:
             return
         analysis_task_registry[cls.__name__] = cls
 

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -27,11 +27,6 @@
     particle_handler_registry
 
 from .profiles import \
-    YTEmptyProfileData, \
-    BinnedProfile, \
-    BinnedProfile1D, \
-    BinnedProfile2D, \
-    BinnedProfile3D, \
     create_profile, \
     Profile1D, \
     Profile2D, \

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -15,21 +15,29 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-import math
-import weakref
-import itertools
-import shelve
 from functools import wraps
 import fileinput
 from re import finditer
+from tempfile import TemporaryFile
 import os
+import zipfile
 
 from yt.config import ytcfg
-from yt.funcs import *
-from yt.utilities.logger import ytLogger
-from .data_containers import \
-    YTSelectionContainer1D, YTSelectionContainer2D, YTSelectionContainer3D, \
-    restore_field_information_state, YTFieldData
+from yt.data_objects.data_containers import \
+    YTSelectionContainer1D, \
+    YTSelectionContainer2D, \
+    YTSelectionContainer3D, \
+    YTFieldData
+from yt.funcs import \
+    ensure_list, \
+    mylog, \
+    get_memory_usage, \
+    iterable, \
+    only_on_root
+from yt.utilities.exceptions import \
+    YTParticleDepositionNotImplemented, \
+    YTNoAPIKey, \
+    YTTooManyVertices
 from yt.utilities.lib.QuadTree import \
     QuadTree
 from yt.utilities.lib.Interpolators import \
@@ -38,8 +46,6 @@
     fill_region
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
-from yt.utilities.data_point_utilities import CombineGrids,\
-    DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
 from yt.utilities.minimal_representation import \
     MinimalProjectionData
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -47,16 +53,10 @@
 from yt.units.unit_object import Unit
 import yt.geometry.particle_deposit as particle_deposit
 from yt.utilities.grid_data_format.writer import write_to_gdf
+from yt.fields.field_exceptions import \
+    NeedsOriginalGrid
 from yt.frontends.stream.api import load_uniform_grid
 
-from yt.fields.field_exceptions import \
-    NeedsGridType,\
-    NeedsOriginalGrid,\
-    NeedsDataField,\
-    NeedsProperty,\
-    NeedsParameter
-from yt.fields.derived_field import \
-    TranslationFunc
 
 class YTStreamline(YTSelectionContainer1D):
     """
@@ -369,14 +369,13 @@
         data['pdy'] = self.ds.arr(pdy, code_length)
         data['fields'] = nvals
         # Now we run the finalizer, which is ignored if we don't need it
-        fd = data['fields']
         field_data = np.hsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
-            finfo = self.ds._get_field_info(*field)
             mylog.debug("Setting field %s", field)
             input_units = self._projected_units[field]
             self[field] = self.ds.arr(field_data[fi].ravel(), input_units)
-        for i in list(data.keys()): self[i] = data.pop(i)
+        for i in list(data.keys()):
+            self[i] = data.pop(i)
         mylog.info("Projection completed")
         self.tree = tree
 
@@ -939,7 +938,6 @@
         ls.current_level += 1
         ls.current_dx = ls.base_dx / \
             self.ds.relative_refinement(0, ls.current_level)
-        LL = ls.left_edge - ls.domain_left_edge
         ls.old_global_startindex = ls.global_startindex
         ls.global_startindex, end_index, ls.current_dims = \
             self._minimal_box(ls.current_dx)
@@ -1509,11 +1507,8 @@
                     color_log = True, emit_log = True, plot_index = None,
                     color_field_max = None, color_field_min = None,
                     emit_field_max = None, emit_field_min = None):
-        import io
-        from sys import version
         if plot_index is None:
             plot_index = 0
-            vmax=0
         ftype = [("cind", "uint8"), ("emit", "float")]
         vtype = [("x","float"),("y","float"), ("z","float")]
         #(0) formulate vertices
@@ -1552,7 +1547,7 @@
                 tmp = self.vertices[i,:]
                 np.divide(tmp, dist_fac, tmp)
                 v[ax][:] = tmp
-        return  v, lut, transparency, emiss, f['cind']
+        return v, lut, transparency, emiss, f['cind']
 
 
     def export_ply(self, filename, bounds = None, color_field = None,
@@ -1734,8 +1729,6 @@
         api_key = api_key or ytcfg.get("yt","sketchfab_api_key")
         if api_key in (None, "None"):
             raise YTNoAPIKey("SketchFab.com", "sketchfab_api_key")
-        import zipfile, json
-        from tempfile import TemporaryFile
 
         ply_file = TemporaryFile()
         self.export_ply(ply_file, bounds, color_field, color_map, color_log,

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -13,32 +13,39 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
 import itertools
-import os
-import types
 import uuid
-from yt.extern.six import string_types
-
-data_object_registry = {}
 
 import numpy as np
 import weakref
 import shelve
+
+from collections import defaultdict
 from contextlib import contextmanager
 
-from yt.funcs import get_output_filename
-from yt.funcs import *
-
 from yt.data_objects.particle_io import particle_handler_registry
 from yt.frontends.ytdata.utilities import \
     save_as_dataset
+from yt.funcs import \
+    get_output_filename, \
+    mylog, \
+    ensure_list, \
+    fix_axis, \
+    iterable
 from yt.units.unit_object import UnitParseError
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
 from yt.utilities.exceptions import \
     YTUnitConversionError, \
     YTFieldUnitError, \
     YTFieldUnitParseError, \
-    YTSpatialFieldUnitError
+    YTSpatialFieldUnitError, \
+    YTCouldNotGenerateField, \
+    YTFieldNotParseable, \
+    YTFieldNotFound, \
+    YTFieldTypeNotFound, \
+    YTDataSelectorNotImplemented
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -55,9 +62,10 @@
     compose_selector
 from yt.extern.six import add_metaclass, string_types
 
+data_object_registry = {}
+
 def force_array(item, shape):
     try:
-        sh = item.shape
         return item.copy()
     except AttributeError:
         if item:
@@ -189,7 +197,7 @@
         elif isinstance(center, string_types):
             if center.lower() in ("c", "center"):
                 self.center = self.ds.domain_center
-             # is this dangerous for race conditions?
+            # is this dangerous for race conditions?
             elif center.lower() in ("max", "m"):
                 self.center = self.ds.find_max(("gas", "density"))[1]
             elif center.startswith("max_"):
@@ -831,7 +839,7 @@
             fields_to_get.append(field)
         if len(fields_to_get) == 0 and len(fields_to_generate) == 0:
             return
-        elif self._locked == True:
+        elif self._locked is True:
             raise GenerationInProgress(fields)
         # Track which ones we want in the end
         ofields = set(list(self.field_data.keys())
@@ -1407,7 +1415,7 @@
         with child cells are left untouched.
         """
         for grid in self._grids:
-            if default_value != None:
+            if default_value is not None:
                 grid[field] = np.ones(grid.ActiveDimensions)*default_value
             grid[field][self._get_point_indices(grid)] = value
 
@@ -1474,167 +1482,3 @@
     obj = cls(*new_args)
     obj.field_parameters.update(field_parameters)
     return ReconstructedObject((ds, obj))
-
-class YTBooleanRegionBase(YTSelectionContainer3D):
-    """
-    This will build a hybrid region based on the boolean logic
-    of the regions.
-
-    Parameters
-    ----------
-    regions : list
-        A list of region objects and strings describing the boolean logic
-        to use when building the hybrid region. The boolean logic can be
-        nested using parentheses.
-
-    Examples
-    --------
-    >>> re1 = ds.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4],
-        [0.6, 0.6, 0.6])
-    >>> re2 = ds.region([0.5, 0.5, 0.5], [0.45, 0.45, 0.45],
-        [0.55, 0.55, 0.55])
-    >>> sp1 = ds.sphere([0.575, 0.575, 0.575], .03)
-    >>> toroid_shape = ds.boolean([re1, "NOT", re2])
-    >>> toroid_shape_with_hole = ds.boolean([re1, "NOT", "(", re2, "OR",
-        sp1, ")"])
-    """
-    _type_name = "boolean"
-    _con_args = ("regions",)
-    def __init__(self, regions, fields = None, ds = None, field_parameters = None, data_source = None):
-        # Center is meaningless, but we'll define it all the same.
-        YTSelectionContainer3D.__init__(self, [0.5]*3, fields, ds, field_parameters, data_source)
-        self.regions = regions
-        self._all_regions = []
-        self._some_overlap = []
-        self._all_overlap = []
-        self._cut_masks = {}
-        self._get_all_regions()
-        self._make_overlaps()
-        self._get_list_of_grids()
-
-    def _get_all_regions(self):
-        # Before anything, we simply find out which regions are involved in all
-        # of this process, uniquely.
-        for item in self.regions:
-            if isinstance(item, bytes): continue
-            self._all_regions.append(item)
-            # So cut_masks don't get messed up.
-            item._boolean_touched = True
-        self._all_regions = np.unique(self._all_regions)
-
-    def _make_overlaps(self):
-        # Using the processed cut_masks, we'll figure out what grids
-        # are left in the hybrid region.
-        pbar = get_pbar("Building boolean", len(self._all_regions))
-        for i, region in enumerate(self._all_regions):
-            try:
-                region._get_list_of_grids() # This is no longer supported.
-                alias = region
-            except AttributeError:
-                alias = region.data         # This is no longer supported.
-            for grid in alias._grids:
-                if grid in self._some_overlap or grid in self._all_overlap:
-                    continue
-                # Get the cut_mask for this grid in this region, and see
-                # if there's any overlap with the overall cut_mask.
-                overall = self._get_cut_mask(grid)
-                local = force_array(alias._get_cut_mask(grid),
-                    grid.ActiveDimensions)
-                # Below we don't want to match empty masks.
-                if overall.sum() == 0 and local.sum() == 0: continue
-                # The whole grid is in the hybrid region if a) its cut_mask
-                # in the original region is identical to the new one and b)
-                # the original region cut_mask is all ones.
-                if (local == np.bitwise_and(overall, local)).all() and \
-                        (local == True).all():
-                    self._all_overlap.append(grid)
-                    continue
-                if (overall == local).any():
-                    # Some of local is in overall
-                    self._some_overlap.append(grid)
-                    continue
-            pbar.update(i)
-        pbar.finish()
-
-    def __repr__(self):
-        # We'll do this the slow way to be clear what's going on
-        s = "%s (%s): " % (self.__class__.__name__, self.ds)
-        s += "["
-        for i, region in enumerate(self.regions):
-            if region in ["OR", "AND", "NOT", "(", ")"]:
-                s += region
-            else:
-                s += region.__repr__()
-            if i < (len(self.regions) - 1): s += ", "
-        s += "]"
-        return s
-
-    def _is_fully_enclosed(self, grid):
-        return (grid in self._all_overlap)
-
-    def _get_list_of_grids(self):
-        self._grids = np.array(self._some_overlap + self._all_overlap,
-            dtype='object')
-
-    def _get_cut_mask(self, grid, field=None):
-        if self._is_fully_enclosed(grid):
-            return True # We do not want child masking here
-        if grid.id in self._cut_masks:
-            return self._cut_masks[grid.id]
-        # If we get this far, we have to generate the cut_mask.
-        return self._get_level_mask(self.regions, grid)
-
-    def _get_level_mask(self, ops, grid):
-        level_masks = []
-        end = 0
-        for i, item in enumerate(ops):
-            if end > 0 and i < end:
-                # We skip over things inside parentheses on this level.
-                continue
-            if isinstance(item, YTDataContainer):
-                # Add this regions cut_mask to level_masks
-                level_masks.append(force_array(item._get_cut_mask(grid),
-                    grid.ActiveDimensions))
-            elif item == "AND" or item == "NOT" or item == "OR":
-                level_masks.append(item)
-            elif item == "(":
-                # recurse down, and we'll append the results, which
-                # should be a single cut_mask
-                open_count = 0
-                for ii, item in enumerate(ops[i + 1:]):
-                    # We look for the matching closing parentheses to find
-                    # where we slice ops.
-                    if item == "(":
-                        open_count += 1
-                    if item == ")" and open_count > 0:
-                        open_count -= 1
-                    elif item == ")" and open_count == 0:
-                        end = i + ii + 1
-                        break
-                level_masks.append(force_array(self._get_level_mask(ops[i + 1:end],
-                    grid), grid.ActiveDimensions))
-                end += 1
-            elif isinstance(item.data, AMRData):
-                level_masks.append(force_array(item.data._get_cut_mask(grid),
-                    grid.ActiveDimensions))
-            else:
-                mylog.error("Item in the boolean construction unidentified.")
-        # Now we do the logic on our level_mask.
-        # There should be no nested logic anymore.
-        # The first item should be a cut_mask,
-        # so that will be our starting point.
-        this_cut_mask = level_masks[0]
-        for i, item in enumerate(level_masks):
-            # I could use a slice above, but I'll keep i consistent instead.
-            if i == 0: continue
-            if item == "AND":
-                # So, the next item in level_masks we want to AND.
-                np.bitwise_and(this_cut_mask, level_masks[i+1], this_cut_mask)
-            if item == "NOT":
-                # It's convenient to remember that NOT == AND NOT
-                np.bitwise_and(this_cut_mask, np.invert(level_masks[i+1]),
-                    this_cut_mask)
-            if item == "OR":
-                np.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
-        self._cut_masks[grid.id] = this_cut_mask
-        return this_cut_mask

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -17,18 +17,15 @@
 
 import numpy as np
 
-from yt.funcs import *
-
-from yt.config import ytcfg
-from yt.units.yt_array import YTArray, uconcatenate, array_like_field
-from yt.utilities.exceptions import YTFieldNotFound
+from yt.funcs import \
+    camelcase_to_underscore, \
+    ensure_list
+from yt.units.yt_array import array_like_field
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_objects
-from yt.utilities.lib.Octree import Octree
 from yt.utilities.physical_constants import \
-    gravitational_constant_cgs, \
-    HUGE
-from yt.utilities.math_utils import prec_accum
+    gravitational_constant_cgs
+from yt.utilities.physical_ratios import HUGE
 from yt.extern.six import add_metaclass
 
 derived_quantity_registry = {}
@@ -202,7 +199,6 @@
     def __call__(self):
         self.data_source.ds.index
         fi = self.data_source.ds.field_info
-        fields = []
         if ("gas", "cell_mass") in fi:
             gas = super(TotalMass, self).__call__([('gas', 'cell_mass')])
         else:

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -13,25 +13,17 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import pdb
 import weakref
-import itertools
 import numpy as np
 
-from yt.funcs import *
-
 from yt.data_objects.data_containers import \
     YTFieldData, \
-    YTDataContainer, \
     YTSelectionContainer
-from yt.fields.field_exceptions import \
-    NeedsGridType, \
-    NeedsOriginalGrid, \
-    NeedsDataField, \
-    NeedsProperty, \
-    NeedsParameter
 from yt.geometry.selection_routines import convert_mask_to_indices
 import yt.geometry.particle_deposit as particle_deposit
+from yt.utilities.exceptions import \
+    YTFieldTypeNotFound, \
+    YTParticleDepositionNotImplemented
 from yt.utilities.lib.Interpolators import \
     ghost_zone_interpolate
 
@@ -234,15 +226,12 @@
         # We will attempt this by creating a datacube that is exactly bigger
         # than the grid by nZones*dx in each direction
         nl = self.get_global_startindex() - n_zones
-        nr = nl + self.ActiveDimensions + 2 * n_zones
         new_left_edge = nl * self.dds + self.ds.domain_left_edge
-        new_right_edge = nr * self.dds + self.ds.domain_left_edge
 
         # Something different needs to be done for the root grid, though
         level = self.Level
         if all_levels:
             level = self.index.max_level + 1
-        args = (level, new_left_edge, new_right_edge)
         kwargs = {'dims': self.ActiveDimensions + 2*n_zones,
                   'num_ghost_zones':n_zones,
                   'use_pbar':False, 'fields':fields}

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -18,23 +18,20 @@
 
 from yt.data_objects.data_containers import \
     YTFieldData, \
-    YTDataContainer, \
     YTSelectionContainer
-from yt.fields.field_exceptions import \
-    NeedsGridType, \
-    NeedsOriginalGrid, \
-    NeedsDataField, \
-    NeedsProperty, \
-    NeedsParameter
 import yt.geometry.particle_deposit as particle_deposit
 import yt.geometry.particle_smooth as particle_smooth
-from yt.funcs import *
+
+from yt.funcs import mylog
 from yt.utilities.lib.geometry_utils import compute_morton
 from yt.geometry.particle_oct_container import \
     ParticleOctreeContainer
 from yt.units.yt_array import YTArray
 from yt.units.dimensions import length
-from yt.utilities.exceptions import YTInvalidPositionArray
+from yt.utilities.exceptions import \
+    YTInvalidPositionArray, \
+    YTFieldTypeNotFound, \
+    YTParticleDepositionNotImplemented
 
 def cell_count_cache(func):
     def cc_cache_func(self, dobj):

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -14,16 +14,14 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
 import copy
+from collections import defaultdict
 
 from contextlib import contextmanager
-from functools import wraps
 
 from yt.fields.field_info_container import \
     NullFunc, TranslationFunc
 from yt.utilities.exceptions import YTIllDefinedFilter
-from yt.funcs import *
 
 # One to many mapping
 filter_registry = defaultdict(list)

diff -r 2f1010e43b87c85b89ad01865f2469084cd59f9f -r cba5e95e12bdd42c2a6f1b7f540ea524d73785be yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -15,7 +15,11 @@
 
 import numpy as np
 
-from yt.funcs import *
+from collections import defaultdict
+
+from yt.funcs import \
+    ensure_list, \
+    mylog
 from yt.extern.six import add_metaclass
 
 particle_handler_registry = defaultdict()

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list