[yt-svn] commit/yt: 20 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Apr 17 14:19:01 PDT 2017


20 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/3ef48bc119d1/
Changeset:   3ef48bc119d1
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 17:03:27+00:00
Summary:     Convert yield assert in yt/testing.py
Affected #:  1 file

diff -r e265191afc164152ef482e861826d6dc877e6893 -r 3ef48bc119d1c92bc64a2062cc607154dbf1cd26 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -576,7 +576,7 @@
         unit_attr = getattr(ds2, "%s_unit" % u, None)
         if unit_attr is not None:
             attrs2.append(unit_attr)
-    yield assert_equal, attrs1, attrs2
+    assert_equal(attrs1, attrs2)
 
 # This is an export of the 40 grids in IsolatedGalaxy that are of level 4 or
 # lower.  It's just designed to give a sample AMR index to deal with.


https://bitbucket.org/yt_analysis/yt/commits/5e360caa60a5/
Changeset:   5e360caa60a5
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 17:03:54+00:00
Summary:     Convert yt.units tests to not use yield assert

Also eliminate warnings from numpy about invalid operations
Affected #:  2 files

diff -r 3ef48bc119d1c92bc64a2062cc607154dbf1cd26 -r 5e360caa60a5f430538bc66939788a1c1e58682b yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -65,7 +65,6 @@
                 raise RuntimeError
 
             full_set.add(new_symbol)
-    yield assert_true, True
 
 def test_dimensionless():
     """
@@ -74,20 +73,20 @@
     """
     u1 = Unit()
 
-    yield assert_true, u1.is_dimensionless
-    yield assert_true, u1.expr == 1
-    yield assert_true, u1.base_value == 1
-    yield assert_true, u1.dimensions == 1
+    assert_true(u1.is_dimensionless)
+    assert_true(u1.expr == 1)
+    assert_true(u1.base_value == 1)
+    assert_true(u1.dimensions == 1)
 
     u2 = Unit("")
 
-    yield assert_true, u2.is_dimensionless
-    yield assert_true, u2.expr == 1
-    yield assert_true, u2.base_value == 1
-    yield assert_true, u2.dimensions == 1
+    assert_true(u2.is_dimensionless)
+    assert_true(u2.expr == 1)
+    assert_true(u2.base_value == 1)
+    assert_true(u2.dimensions == 1)
 
-    yield assert_equal, u1.latex_repr, ''
-    yield assert_equal, u2.latex_repr, ''
+    assert_equal(u1.latex_repr, '')
+    assert_equal(u2.latex_repr, '')
 
 #
 # Start init tests
@@ -100,39 +99,38 @@
     """
 
     u1 = Unit("g * cm**2 * s**-2")
-    yield assert_true, u1.dimensions == energy
-    yield assert_true, u1.base_value == 1.0
+    assert_true(u1.dimensions == energy)
+    assert_true(u1.base_value == 1.0)
 
     # make sure order doesn't matter
     u2 = Unit("cm**2 * s**-2 * g")
-    yield assert_true, u2.dimensions == energy
-    yield assert_true, u2.base_value == 1.0
+    assert_true(u2.dimensions == energy)
+    assert_true(u2.base_value == 1.0)
 
     # Test rationals
     u3 = Unit("g**0.5 * cm**-0.5 * s**-1")
-    yield assert_true, u3.dimensions == magnetic_field
-    yield assert_true, u3.base_value == 1.0
+    assert_true(u3.dimensions == magnetic_field)
+    assert_true(u3.base_value == 1.0)
 
     # sqrt functions
     u4 = Unit("sqrt(g)/sqrt(cm)/s")
-    yield assert_true, u4.dimensions == magnetic_field
-    yield assert_true, u4.base_value == 1.0
+    assert_true(u4.dimensions == magnetic_field)
+    assert_true(u4.base_value == 1.0)
 
     # commutative sqrt function
     u5 = Unit("sqrt(g/cm)/s")
-    yield assert_true, u5.dimensions == magnetic_field
-    yield assert_true, u5.base_value == 1.0
+    assert_true(u5.dimensions == magnetic_field)
+    assert_true(u5.base_value == 1.0)
 
     # nonzero CGS conversion factor
     u6 = Unit("Msun/pc**3")
-    yield assert_true, u6.dimensions == mass/length**3
-    yield assert_array_almost_equal_nulp, np.array([u6.base_value]), \
-        np.array([mass_sun_grams/cm_per_pc**3])
+    assert_true(u6.dimensions == mass/length**3)
+    assert_array_almost_equal_nulp(np.array([u6.base_value]), np.array([mass_sun_grams/cm_per_pc**3]))
 
-    yield assert_raises, UnitParseError, Unit, 'm**m'
-    yield assert_raises, UnitParseError, Unit, 'm**g'
-    yield assert_raises, UnitParseError, Unit, 'm+g'
-    yield assert_raises, UnitParseError, Unit, 'm-g'
+    assert_raises(UnitParseError, Unit, 'm**m')
+    assert_raises(UnitParseError, Unit, 'm**g')
+    assert_raises(UnitParseError, Unit, 'm+g')
+    assert_raises(UnitParseError, Unit, 'm-g')
 
 
 def test_create_from_expr():
@@ -156,20 +154,20 @@
     u3 = Unit(s3)
     u4 = Unit(s4)
 
-    yield assert_true, u1.expr == s1
-    yield assert_true, u2.expr == s2
-    yield assert_true, u3.expr == s3
-    yield assert_true, u4.expr == s4
+    assert_true(u1.expr == s1)
+    assert_true(u2.expr == s2)
+    assert_true(u3.expr == s3)
+    assert_true(u4.expr == s4)
 
-    yield assert_allclose_units, u1.base_value, pc_cgs, 1e-12
-    yield assert_allclose_units, u2.base_value, yr_cgs, 1e-12
-    yield assert_allclose_units, u3.base_value, pc_cgs * yr_cgs, 1e-12
-    yield assert_allclose_units, u4.base_value, pc_cgs**2 / yr_cgs, 1e-12
+    assert_allclose_units(u1.base_value, pc_cgs, 1e-12)
+    assert_allclose_units(u2.base_value, yr_cgs, 1e-12)
+    assert_allclose_units(u3.base_value, pc_cgs * yr_cgs, 1e-12)
+    assert_allclose_units(u4.base_value, pc_cgs**2 / yr_cgs, 1e-12)
 
-    yield assert_true, u1.dimensions == length
-    yield assert_true, u2.dimensions == time
-    yield assert_true, u3.dimensions == length * time
-    yield assert_true, u4.dimensions == length**2 / time
+    assert_true(u1.dimensions == length)
+    assert_true(u2.dimensions == time)
+    assert_true(u3.dimensions == length * time)
+    assert_true(u4.dimensions == length**2 / time)
 
 
 def test_create_with_duplicate_dimensions():
@@ -183,11 +181,11 @@
     km_cgs = cm_per_km
     Mpc_cgs = cm_per_mpc
 
-    yield assert_true, u1.base_value == 1
-    yield assert_true, u1.dimensions == power
+    assert_true(u1.base_value == 1)
+    assert_true(u1.dimensions == power)
 
-    yield assert_allclose_units, u2.base_value, km_cgs / Mpc_cgs, 1e-12
-    yield assert_true, u2.dimensions == rate
+    assert_allclose_units(u2.base_value, km_cgs / Mpc_cgs, 1e-12)
+    assert_true(u2.dimensions == rate)
 
 def test_create_new_symbol():
     """
@@ -196,21 +194,21 @@
     """
     u1 = Unit("abc", base_value=42, dimensions=(mass/time))
 
-    yield assert_true, u1.expr == Symbol("abc", positive=True)
-    yield assert_true, u1.base_value == 42
-    yield assert_true, u1.dimensions == mass / time
+    assert_true(u1.expr == Symbol("abc", positive=True))
+    assert_true(u1.base_value == 42)
+    assert_true(u1.dimensions == mass / time)
 
     u1 = Unit("abc", base_value=42, dimensions=length**3)
 
-    yield assert_true, u1.expr == Symbol("abc", positive=True)
-    yield assert_true, u1.base_value == 42
-    yield assert_true, u1.dimensions == length**3
+    assert_true(u1.expr == Symbol("abc", positive=True))
+    assert_true(u1.base_value == 42)
+    assert_true(u1.dimensions == length**3)
 
     u1 = Unit("abc", base_value=42, dimensions=length*(mass*length))
 
-    yield assert_true, u1.expr == Symbol("abc", positive=True)
-    yield assert_true, u1.base_value == 42
-    yield assert_true,  u1.dimensions == length**2*mass
+    assert_true(u1.expr == Symbol("abc", positive=True))
+    assert_true(u1.base_value == 42)
+    assert_true( u1.dimensions == length**2*mass)
 
     assert_raises(UnitParseError, Unit, 'abc', base_value=42,
                   dimensions=length**length)
@@ -229,9 +227,9 @@
     try:
         Unit(Symbol("jigawatts"))
     except UnitParseError:
-        yield assert_true, True
+        assert_true(True)
     else:
-        yield assert_true, False
+        assert_true(False)
 
 def test_create_fail_on_bad_symbol_type():
     """
@@ -241,9 +239,9 @@
     try:
         Unit([1])  # something other than Expr and str
     except UnitParseError:
-        yield assert_true, True
+        assert_true(True)
     else:
-        yield assert_true, False
+        assert_true(False)
 
 def test_create_fail_on_bad_dimensions_type():
     """
@@ -253,9 +251,9 @@
     try:
         Unit("a", base_value=1, dimensions="(mass)")
     except UnitParseError:
-        yield assert_true, True
+        assert_true(True)
     else:
-        yield assert_true, False
+        assert_true(False)
 
 
 def test_create_fail_on_dimensions_content():
@@ -270,7 +268,7 @@
     except UnitParseError:
         pass
     else:
-        yield assert_true, False
+        assert_true(False)
 
 
 def test_create_fail_on_base_value_type():
@@ -281,9 +279,9 @@
     try:
         Unit("a", base_value="a", dimensions=(mass/time))
     except UnitParseError:
-        yield assert_true, True
+        assert_true(True)
     else:
-        yield assert_true, False
+        assert_true(False)
 
 #
 # End init tests
@@ -299,11 +297,11 @@
     speed = pc / Myr
     dimensionless = Unit()
 
-    yield assert_true, str(pc) == "pc"
-    yield assert_true, str(Myr) == "Myr"
-    yield assert_true, str(speed) == "pc/Myr"
-    yield assert_true, repr(speed) == "pc/Myr"
-    yield assert_true, str(dimensionless) == "dimensionless"
+    assert_true(str(pc) == "pc")
+    assert_true(str(Myr) == "Myr")
+    assert_true(str(speed) == "pc/Myr")
+    assert_true(repr(speed) == "pc/Myr")
+    assert_true(str(dimensionless) == "dimensionless")
 
 #
 # Start operation tests
@@ -329,9 +327,9 @@
     # Mul operation
     u3 = u1 * u2
 
-    yield assert_true, u3.expr == msun_sym * pc_sym
-    yield assert_allclose_units, u3.base_value, msun_cgs * pc_cgs, 1e-12
-    yield assert_true, u3.dimensions == mass * length
+    assert_true(u3.expr == msun_sym * pc_sym)
+    assert_allclose_units(u3.base_value, msun_cgs * pc_cgs, 1e-12)
+    assert_true(u3.dimensions == mass * length)
 
     # Pow and Mul operations
     u4 = Unit("pc**2")
@@ -339,9 +337,9 @@
 
     u6 = u4 * u5
 
-    yield assert_true, u6.expr == pc_sym**2 * msun_sym * s_sym
-    yield assert_allclose_units, u6.base_value, pc_cgs**2 * msun_cgs, 1e-12
-    yield assert_true, u6.dimensions == length**2 * mass * time
+    assert_true(u6.expr == pc_sym**2 * msun_sym * s_sym)
+    assert_allclose_units(u6.base_value, pc_cgs**2 * msun_cgs, 1e-12)
+    assert_true(u6.dimensions == length**2 * mass * time)
 
 
 def test_division():
@@ -363,9 +361,9 @@
 
     u3 = u1 / u2
 
-    yield assert_true, u3.expr == pc_sym / (km_sym * s_sym)
-    yield assert_allclose_units, u3.base_value, pc_cgs / km_cgs, 1e-12
-    yield assert_true, u3.dimensions == 1 / time
+    assert_true(u3.expr == pc_sym / (km_sym * s_sym))
+    assert_allclose_units(u3.base_value, pc_cgs / km_cgs, 1e-12)
+    assert_true(u3.dimensions == 1 / time)
 
 
 def test_power():
@@ -382,13 +380,13 @@
 
     u2 = u1**2
 
-    yield assert_true, u2.dimensions == u1_dims**2
-    yield assert_allclose_units, u2.base_value, (pc_cgs**2 * mK_cgs**4)**2, 1e-12
+    assert_true(u2.dimensions == u1_dims**2)
+    assert_allclose_units(u2.base_value, (pc_cgs**2 * mK_cgs**4)**2, 1e-12)
 
     u3 = u1**(-1.0/3)
 
-    yield assert_true, u3.dimensions == nsimplify(u1_dims**(-1.0/3))
-    yield assert_allclose_units, u3.base_value, (pc_cgs**2 * mK_cgs**4)**(-1.0/3), 1e-12
+    assert_true(u3.dimensions == nsimplify(u1_dims**(-1.0/3)))
+    assert_allclose_units(u3.base_value, (pc_cgs**2 * mK_cgs**4)**(-1.0/3), 1e-12)
 
 
 def test_equality():
@@ -399,7 +397,7 @@
     u1 = Unit("km * s**-1")
     u2 = Unit("m * ms**-1")
 
-    yield assert_true, u1 == u2
+    assert_true(u1 == u2)
 
 #
 # End operation tests.
@@ -417,21 +415,20 @@
     u2 = Unit("g * cm**-3")
     u3 = u1.get_base_equivalent()
 
-    yield assert_true, u2.expr == u3.expr
-    yield assert_true, u2 == u3
+    assert_true(u2.expr == u3.expr)
+    assert_true(u2 == u3)
 
-    yield assert_allclose_units, u1.base_value, Msun_cgs / Mpc_cgs**3, 1e-12
-    yield assert_true, u2.base_value == 1
-    yield assert_true, u3.base_value == 1
+    assert_allclose_units(u1.base_value, Msun_cgs / Mpc_cgs**3, 1e-12)
+    assert_true(u2.base_value == 1)
+    assert_true(u3.base_value == 1)
 
     mass_density = mass / length**3
 
-    yield assert_true, u1.dimensions == mass_density
-    yield assert_true, u2.dimensions == mass_density
-    yield assert_true, u3.dimensions == mass_density
+    assert_true(u1.dimensions == mass_density)
+    assert_true(u2.dimensions == mass_density)
+    assert_true(u3.dimensions == mass_density)
 
-    yield assert_allclose_units, get_conversion_factor(u1, u3)[0], \
-        Msun_cgs / Mpc_cgs**3, 1e-12
+    assert_allclose_units(get_conversion_factor(u1, u3)[0], Msun_cgs / Mpc_cgs**3, 1e-12)
 
 def test_is_code_unit():
     ds = fake_random_ds(64, nprocs=1)
@@ -442,12 +439,12 @@
     u5 = Unit('code_mass*g', registry=ds.unit_registry)
     u6 = Unit('g/cm**3')
 
-    yield assert_true, u1.is_code_unit
-    yield assert_true, u2.is_code_unit
-    yield assert_true, u3.is_code_unit
-    yield assert_true, u4.is_code_unit
-    yield assert_true, not u5.is_code_unit
-    yield assert_true, not u6.is_code_unit
+    assert_true(u1.is_code_unit)
+    assert_true(u2.is_code_unit)
+    assert_true(u3.is_code_unit)
+    assert_true(u4.is_code_unit)
+    assert_true(not u5.is_code_unit)
+    assert_true(not u6.is_code_unit)
 
 def test_temperature_offsets():
     u1 = Unit('degC')
@@ -487,22 +484,22 @@
     lat = unit_symbols.lat
     lon = unit_symbols.lon
     deg = unit_symbols.deg
-    yield assert_equal, lat.units.base_offset, 90.0
-    yield assert_equal, (deg*90.0).in_units("lat").value, 0.0
-    yield assert_equal, (deg*180).in_units("lat").value, -90.0
-    yield assert_equal, (lat*0.0).in_units("deg"), deg*90.0
-    yield assert_equal, (lat*-90).in_units("deg"), deg*180
+    assert_equal(lat.units.base_offset, 90.0)
+    assert_equal((deg*90.0).in_units("lat").value, 0.0)
+    assert_equal((deg*180).in_units("lat").value, -90.0)
+    assert_equal((lat*0.0).in_units("deg"), deg*90.0)
+    assert_equal((lat*-90).in_units("deg"), deg*180)
 
-    yield assert_equal, lon.units.base_offset, -180.0
-    yield assert_equal, (deg*0.0).in_units("lon").value, -180.0
-    yield assert_equal, (deg*90.0).in_units("lon").value, -90.0
-    yield assert_equal, (deg*180).in_units("lon").value, 0.0
-    yield assert_equal, (deg*360).in_units("lon").value, 180.0
+    assert_equal(lon.units.base_offset, -180.0)
+    assert_equal((deg*0.0).in_units("lon").value, -180.0)
+    assert_equal((deg*90.0).in_units("lon").value, -90.0)
+    assert_equal((deg*180).in_units("lon").value, 0.0)
+    assert_equal((deg*360).in_units("lon").value, 180.0)
 
-    yield assert_equal, (lon*-180.0).in_units("deg"), deg*0.0
-    yield assert_equal, (lon*-90.0).in_units("deg"), deg*90.0
-    yield assert_equal, (lon*0.0).in_units("deg"), deg*180.0
-    yield assert_equal, (lon*180.0).in_units("deg"), deg*360
+    assert_equal((lon*-180.0).in_units("deg"), deg*0.0)
+    assert_equal((lon*-90.0).in_units("deg"), deg*90.0)
+    assert_equal((lon*0.0).in_units("deg"), deg*180.0)
+    assert_equal((lon*180.0).in_units("deg"), deg*360)
 
 def test_registry_json():
     reg = UnitRegistry()

diff -r 3ef48bc119d1c92bc64a2062cc607154dbf1cd26 -r 5e360caa60a5f430538bc66939788a1c1e58682b yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -68,14 +68,14 @@
     a3 = [4*cm, 5*cm, 6*cm]
     answer = YTArray([5, 7, 9], 'cm')
 
-    yield operate_and_compare, a1, a2, operator.add, answer
-    yield operate_and_compare, a2, a1, operator.add, answer
-    yield operate_and_compare, a1, a3, operator.add, answer
-    yield operate_and_compare, a3, a1, operator.add, answer
-    yield operate_and_compare, a2, a1, np.add, answer
-    yield operate_and_compare, a1, a2, np.add, answer
-    yield operate_and_compare, a1, a3, np.add, answer
-    yield operate_and_compare, a3, a1, np.add, answer
+    operate_and_compare(a1, a2, operator.add, answer)
+    operate_and_compare(a2, a1, operator.add, answer)
+    operate_and_compare(a1, a3, operator.add, answer)
+    operate_and_compare(a3, a1, operator.add, answer)
+    operate_and_compare(a2, a1, np.add, answer)
+    operate_and_compare(a1, a2, np.add, answer)
+    operate_and_compare(a1, a3, np.add, answer)
+    operate_and_compare(a3, a1, np.add, answer)
 
     # different units
     a1 = YTArray([1, 2, 3], 'cm')
@@ -84,12 +84,12 @@
     answer1 = YTArray([401, 502, 603], 'cm')
     answer2 = YTArray([4.01, 5.02, 6.03], 'm')
 
-    yield operate_and_compare, a1, a2, operator.add, answer1
-    yield operate_and_compare, a2, a1, operator.add, answer2
-    yield operate_and_compare, a1, a3, operator.add, answer1
-    yield operate_and_compare, a3, a1, operator.add, answer1
-    yield assert_raises, YTUfuncUnitError, np.add, a1, a2
-    yield assert_raises, YTUfuncUnitError, np.add, a1, a3
+    operate_and_compare(a1, a2, operator.add, answer1)
+    operate_and_compare(a2, a1, operator.add, answer2)
+    operate_and_compare(a1, a3, operator.add, answer1)
+    operate_and_compare(a3, a1, operator.add, answer1)
+    assert_raises(YTUfuncUnitError, np.add, a1, a2)
+    assert_raises(YTUfuncUnitError, np.add, a1, a3)
 
     # Test dimensionless quantities
     a1 = YTArray([1, 2, 3])
@@ -97,21 +97,21 @@
     a3 = [4, 5, 6]
     answer = YTArray([5, 7, 9])
 
-    yield operate_and_compare, a1, a2, operator.add, answer
-    yield operate_and_compare, a2, a1, operator.add, answer
-    yield operate_and_compare, a1, a3, operator.add, answer
-    yield operate_and_compare, a3, a1, operator.add, answer
-    yield operate_and_compare, a1, a2, np.add, answer
-    yield operate_and_compare, a2, a1, np.add, answer
-    yield operate_and_compare, a1, a3, np.add, answer
-    yield operate_and_compare, a3, a1, np.add, answer
+    operate_and_compare(a1, a2, operator.add, answer)
+    operate_and_compare(a2, a1, operator.add, answer)
+    operate_and_compare(a1, a3, operator.add, answer)
+    operate_and_compare(a3, a1, operator.add, answer)
+    operate_and_compare(a1, a2, np.add, answer)
+    operate_and_compare(a2, a1, np.add, answer)
+    operate_and_compare(a1, a3, np.add, answer)
+    operate_and_compare(a3, a1, np.add, answer)
 
     # Catch the different dimensions error
     a1 = YTArray([1, 2, 3], 'm')
     a2 = YTArray([4, 5, 6], 'kg')
 
-    yield assert_raises, YTUnitOperationError, operator.add, a1, a2
-    yield assert_raises, YTUnitOperationError, operator.iadd, a1, a2
+    assert_raises(YTUnitOperationError, operator.add, a1, a2)
+    assert_raises(YTUnitOperationError, operator.iadd, a1, a2)
 
     # adding with zero is allowed irrespective of the units
     zeros = np.zeros(3)
@@ -123,10 +123,10 @@
 
     for op in [operator.add, np.add]:
         for operand in operands:
-            yield operate_and_compare, a1, operand, op, a1
-            yield operate_and_compare, operand, a1, op, a1
-            yield operate_and_compare, 4*m, operand, op, 4*m
-            yield operate_and_compare, operand, 4*m, op, 4*m
+            operate_and_compare(a1, operand, op, a1)
+            operate_and_compare(operand, a1, op, a1)
+            operate_and_compare(4*m, operand, op, 4*m)
+            operate_and_compare(operand, 4*m, op, 4*m)
 
 def test_subtraction():
     """
@@ -141,14 +141,14 @@
     answer1 = YTArray([-3, -3, -3], 'cm')
     answer2 = YTArray([3, 3, 3], 'cm')
 
-    yield operate_and_compare, a1, a2, operator.sub, answer1
-    yield operate_and_compare, a2, a1, operator.sub, answer2
-    yield operate_and_compare, a1, a3, operator.sub, answer1
-    yield operate_and_compare, a3, a1, operator.sub, answer2
-    yield operate_and_compare, a1, a2, np.subtract, answer1
-    yield operate_and_compare, a2, a1, np.subtract, answer2
-    yield operate_and_compare, a1, a3, np.subtract, answer1
-    yield operate_and_compare, a3, a1, np.subtract, answer2
+    operate_and_compare(a1, a2, operator.sub, answer1)
+    operate_and_compare(a2, a1, operator.sub, answer2)
+    operate_and_compare(a1, a3, operator.sub, answer1)
+    operate_and_compare(a3, a1, operator.sub, answer2)
+    operate_and_compare(a1, a2, np.subtract, answer1)
+    operate_and_compare(a2, a1, np.subtract, answer2)
+    operate_and_compare(a1, a3, np.subtract, answer1)
+    operate_and_compare(a3, a1, np.subtract, answer2)
 
     # different units
     a1 = YTArray([1, 2, 3], 'cm')
@@ -158,12 +158,12 @@
     answer2 = YTArray([3.99, 4.98, 5.97], 'm')
     answer3 = YTArray([399, 498, 597], 'cm')
 
-    yield operate_and_compare, a1, a2, operator.sub, answer1
-    yield operate_and_compare, a2, a1, operator.sub, answer2
-    yield operate_and_compare, a1, a3, operator.sub, answer1
-    yield operate_and_compare, a3, a1, operator.sub, answer3
-    yield assert_raises, YTUfuncUnitError, np.subtract, a1, a2
-    yield assert_raises, YTUfuncUnitError, np.subtract, a1, a3
+    operate_and_compare(a1, a2, operator.sub, answer1)
+    operate_and_compare(a2, a1, operator.sub, answer2)
+    operate_and_compare(a1, a3, operator.sub, answer1)
+    operate_and_compare(a3, a1, operator.sub, answer3)
+    assert_raises(YTUfuncUnitError, np.subtract, a1, a2)
+    assert_raises(YTUfuncUnitError, np.subtract, a1, a3)
 
     # Test dimensionless quantities
     a1 = YTArray([1, 2, 3])
@@ -172,21 +172,21 @@
     answer1 = YTArray([-3, -3, -3])
     answer2 = YTArray([3, 3, 3])
 
-    yield operate_and_compare, a1, a2, operator.sub, answer1
-    yield operate_and_compare, a2, a1, operator.sub, answer2
-    yield operate_and_compare, a1, a3, operator.sub, answer1
-    yield operate_and_compare, a3, a1, operator.sub, answer2
-    yield operate_and_compare, a1, a2, np.subtract, answer1
-    yield operate_and_compare, a2, a1, np.subtract, answer2
-    yield operate_and_compare, a1, a3, np.subtract, answer1
-    yield operate_and_compare, a3, a1, np.subtract, answer2
+    operate_and_compare(a1, a2, operator.sub, answer1)
+    operate_and_compare(a2, a1, operator.sub, answer2)
+    operate_and_compare(a1, a3, operator.sub, answer1)
+    operate_and_compare(a3, a1, operator.sub, answer2)
+    operate_and_compare(a1, a2, np.subtract, answer1)
+    operate_and_compare(a2, a1, np.subtract, answer2)
+    operate_and_compare(a1, a3, np.subtract, answer1)
+    operate_and_compare(a3, a1, np.subtract, answer2)
 
     # Catch the different dimensions error
     a1 = YTArray([1, 2, 3], 'm')
     a2 = YTArray([4, 5, 6], 'kg')
 
-    yield assert_raises, YTUnitOperationError, operator.sub, a1, a2
-    yield assert_raises, YTUnitOperationError, operator.isub, a1, a2
+    assert_raises(YTUnitOperationError, operator.sub, a1, a2)
+    assert_raises(YTUnitOperationError, operator.isub, a1, a2)
 
     # subtracting with zero is allowed irrespective of the units
     zeros = np.zeros(3)
@@ -198,10 +198,10 @@
 
     for op in [operator.sub, np.subtract]:
         for operand in operands:
-            yield operate_and_compare, a1, operand, op, a1
-            yield operate_and_compare, operand, a1, op, -a1
-            yield operate_and_compare, 4*m, operand, op, 4*m
-            yield operate_and_compare, operand, 4*m, op, -4*m
+            operate_and_compare(a1, operand, op, a1)
+            operate_and_compare(operand, a1, op, -a1)
+            operate_and_compare(4*m, operand, op, 4*m)
+            operate_and_compare(operand, 4*m, op, -4*m)
 
 def test_multiplication():
     """
@@ -215,14 +215,14 @@
     a3 = [4*cm, 5*cm, 6*cm]
     answer = YTArray([4, 10, 18], 'cm**2')
 
-    yield operate_and_compare, a1, a2, operator.mul, answer
-    yield operate_and_compare, a2, a1, operator.mul, answer
-    yield operate_and_compare, a1, a3, operator.mul, answer
-    yield operate_and_compare, a3, a1, operator.mul, answer
-    yield operate_and_compare, a1, a2, np.multiply, answer
-    yield operate_and_compare, a2, a1, np.multiply, answer
-    yield operate_and_compare, a1, a3, np.multiply, answer
-    yield operate_and_compare, a3, a1, np.multiply, answer
+    operate_and_compare(a1, a2, operator.mul, answer)
+    operate_and_compare(a2, a1, operator.mul, answer)
+    operate_and_compare(a1, a3, operator.mul, answer)
+    operate_and_compare(a3, a1, operator.mul, answer)
+    operate_and_compare(a1, a2, np.multiply, answer)
+    operate_and_compare(a2, a1, np.multiply, answer)
+    operate_and_compare(a1, a3, np.multiply, answer)
+    operate_and_compare(a3, a1, np.multiply, answer)
 
     # different units, same dimension
     a1 = YTArray([1, 2, 3], 'cm')
@@ -232,14 +232,14 @@
     answer2 = YTArray([.04, .10, .18], 'm**2')
     answer3 = YTArray([4, 10, 18], 'cm*m')
 
-    yield operate_and_compare, a1, a2, operator.mul, answer1
-    yield operate_and_compare, a2, a1, operator.mul, answer2
-    yield operate_and_compare, a1, a3, operator.mul, answer1
-    yield operate_and_compare, a3, a1, operator.mul, answer2
-    yield operate_and_compare, a1, a2, np.multiply, answer3
-    yield operate_and_compare, a2, a1, np.multiply, answer3
-    yield operate_and_compare, a1, a3, np.multiply, answer3
-    yield operate_and_compare, a3, a1, np.multiply, answer3
+    operate_and_compare(a1, a2, operator.mul, answer1)
+    operate_and_compare(a2, a1, operator.mul, answer2)
+    operate_and_compare(a1, a3, operator.mul, answer1)
+    operate_and_compare(a3, a1, operator.mul, answer2)
+    operate_and_compare(a1, a2, np.multiply, answer3)
+    operate_and_compare(a2, a1, np.multiply, answer3)
+    operate_and_compare(a1, a3, np.multiply, answer3)
+    operate_and_compare(a3, a1, np.multiply, answer3)
 
     # different dimensions
     a1 = YTArray([1, 2, 3], 'cm')
@@ -247,14 +247,14 @@
     a3 = [4*g, 5*g, 6*g]
     answer = YTArray([4, 10, 18], 'cm*g')
 
-    yield operate_and_compare, a1, a2, operator.mul, answer
-    yield operate_and_compare, a2, a1, operator.mul, answer
-    yield operate_and_compare, a1, a3, operator.mul, answer
-    yield operate_and_compare, a3, a1, operator.mul, answer
-    yield operate_and_compare, a1, a2, np.multiply, answer
-    yield operate_and_compare, a2, a1, np.multiply, answer
-    yield operate_and_compare, a1, a3, np.multiply, answer
-    yield operate_and_compare, a3, a1, np.multiply, answer
+    operate_and_compare(a1, a2, operator.mul, answer)
+    operate_and_compare(a2, a1, operator.mul, answer)
+    operate_and_compare(a1, a3, operator.mul, answer)
+    operate_and_compare(a3, a1, operator.mul, answer)
+    operate_and_compare(a1, a2, np.multiply, answer)
+    operate_and_compare(a2, a1, np.multiply, answer)
+    operate_and_compare(a1, a3, np.multiply, answer)
+    operate_and_compare(a3, a1, np.multiply, answer)
 
     # One dimensionless, one unitful
     a1 = YTArray([1, 2, 3], 'cm')
@@ -262,14 +262,14 @@
     a3 = [4, 5, 6]
     answer = YTArray([4, 10, 18], 'cm')
 
-    yield operate_and_compare, a1, a2, operator.mul, answer
-    yield operate_and_compare, a2, a1, operator.mul, answer
-    yield operate_and_compare, a1, a3, operator.mul, answer
-    yield operate_and_compare, a3, a1, operator.mul, answer
-    yield operate_and_compare, a1, a2, np.multiply, answer
-    yield operate_and_compare, a2, a1, np.multiply, answer
-    yield operate_and_compare, a1, a3, np.multiply, answer
-    yield operate_and_compare, a3, a1, np.multiply, answer
+    operate_and_compare(a1, a2, operator.mul, answer)
+    operate_and_compare(a2, a1, operator.mul, answer)
+    operate_and_compare(a1, a3, operator.mul, answer)
+    operate_and_compare(a3, a1, operator.mul, answer)
+    operate_and_compare(a1, a2, np.multiply, answer)
+    operate_and_compare(a2, a1, np.multiply, answer)
+    operate_and_compare(a1, a3, np.multiply, answer)
+    operate_and_compare(a3, a1, np.multiply, answer)
 
     # Both dimensionless quantities
     a1 = YTArray([1, 2, 3])
@@ -277,14 +277,14 @@
     a3 = [4, 5, 6]
     answer = YTArray([4, 10, 18])
 
-    yield operate_and_compare, a1, a2, operator.mul, answer
-    yield operate_and_compare, a2, a1, operator.mul, answer
-    yield operate_and_compare, a1, a3, operator.mul, answer
-    yield operate_and_compare, a3, a1, operator.mul, answer
-    yield operate_and_compare, a1, a2, np.multiply, answer
-    yield operate_and_compare, a2, a1, np.multiply, answer
-    yield operate_and_compare, a1, a3, np.multiply, answer
-    yield operate_and_compare, a3, a1, np.multiply, answer
+    operate_and_compare(a1, a2, operator.mul, answer)
+    operate_and_compare(a2, a1, operator.mul, answer)
+    operate_and_compare(a1, a3, operator.mul, answer)
+    operate_and_compare(a3, a1, operator.mul, answer)
+    operate_and_compare(a1, a2, np.multiply, answer)
+    operate_and_compare(a2, a1, np.multiply, answer)
+    operate_and_compare(a1, a3, np.multiply, answer)
+    operate_and_compare(a3, a1, np.multiply, answer)
 
 
 def test_division():
@@ -304,14 +304,14 @@
     else:
         op = operator.truediv
 
-    yield operate_and_compare, a1, a2, op, answer1
-    yield operate_and_compare, a2, a1, op, answer2
-    yield operate_and_compare, a1, a3, op, answer1
-    yield operate_and_compare, a3, a1, op, answer2
-    yield operate_and_compare, a1, a2, np.divide, answer1
-    yield operate_and_compare, a2, a1, np.divide, answer2
-    yield operate_and_compare, a1, a3, np.divide, answer1
-    yield operate_and_compare, a3, a1, np.divide, answer2
+    operate_and_compare(a1, a2, op, answer1)
+    operate_and_compare(a2, a1, op, answer2)
+    operate_and_compare(a1, a3, op, answer1)
+    operate_and_compare(a3, a1, op, answer2)
+    operate_and_compare(a1, a2, np.divide, answer1)
+    operate_and_compare(a2, a1, np.divide, answer2)
+    operate_and_compare(a1, a3, np.divide, answer1)
+    operate_and_compare(a3, a1, np.divide, answer2)
 
     # different units, same dimension
     a1 = YTArray([1., 2., 3.], 'cm')
@@ -322,14 +322,14 @@
     answer3 = YTArray([0.25, 0.4, 0.5], 'cm/m')
     answer4 = YTArray([4.0, 2.5, 2.0], 'm/cm')
 
-    yield operate_and_compare, a1, a2, op, answer1
-    yield operate_and_compare, a2, a1, op, answer2
-    yield operate_and_compare, a1, a3, op, answer1
-    yield operate_and_compare, a3, a1, op, answer2
-    yield operate_and_compare, a1, a2, np.divide, answer3
-    yield operate_and_compare, a2, a1, np.divide, answer4
-    yield operate_and_compare, a1, a3, np.divide, answer3
-    yield operate_and_compare, a3, a1, np.divide, answer4
+    operate_and_compare(a1, a2, op, answer1)
+    operate_and_compare(a2, a1, op, answer2)
+    operate_and_compare(a1, a3, op, answer1)
+    operate_and_compare(a3, a1, op, answer2)
+    operate_and_compare(a1, a2, np.divide, answer3)
+    operate_and_compare(a2, a1, np.divide, answer4)
+    operate_and_compare(a1, a3, np.divide, answer3)
+    operate_and_compare(a3, a1, np.divide, answer4)
 
     # different dimensions
     a1 = YTArray([1., 2., 3.], 'cm')
@@ -338,14 +338,14 @@
     answer1 = YTArray([0.25, 0.4, 0.5], 'cm/g')
     answer2 = YTArray([4, 2.5, 2], 'g/cm')
 
-    yield operate_and_compare, a1, a2, op, answer1
-    yield operate_and_compare, a2, a1, op, answer2
-    yield operate_and_compare, a1, a3, op, answer1
-    yield operate_and_compare, a3, a1, op, answer2
-    yield operate_and_compare, a1, a2, np.divide, answer1
-    yield operate_and_compare, a2, a1, np.divide, answer2
-    yield operate_and_compare, a1, a3, np.divide, answer1
-    yield operate_and_compare, a3, a1, np.divide, answer2
+    operate_and_compare(a1, a2, op, answer1)
+    operate_and_compare(a2, a1, op, answer2)
+    operate_and_compare(a1, a3, op, answer1)
+    operate_and_compare(a3, a1, op, answer2)
+    operate_and_compare(a1, a2, np.divide, answer1)
+    operate_and_compare(a2, a1, np.divide, answer2)
+    operate_and_compare(a1, a3, np.divide, answer1)
+    operate_and_compare(a3, a1, np.divide, answer2)
 
     # One dimensionless, one unitful
     a1 = YTArray([1., 2., 3.], 'cm')
@@ -354,14 +354,14 @@
     answer1 = YTArray([0.25, 0.4, 0.5], 'cm')
     answer2 = YTArray([4, 2.5, 2], '1/cm')
 
-    yield operate_and_compare, a1, a2, op, answer1
-    yield operate_and_compare, a2, a1, op, answer2
-    yield operate_and_compare, a1, a3, op, answer1
-    yield operate_and_compare, a3, a1, op, answer2
-    yield operate_and_compare, a1, a2, np.divide, answer1
-    yield operate_and_compare, a2, a1, np.divide, answer2
-    yield operate_and_compare, a1, a3, np.divide, answer1
-    yield operate_and_compare, a3, a1, np.divide, answer2
+    operate_and_compare(a1, a2, op, answer1)
+    operate_and_compare(a2, a1, op, answer2)
+    operate_and_compare(a1, a3, op, answer1)
+    operate_and_compare(a3, a1, op, answer2)
+    operate_and_compare(a1, a2, np.divide, answer1)
+    operate_and_compare(a2, a1, np.divide, answer2)
+    operate_and_compare(a1, a3, np.divide, answer1)
+    operate_and_compare(a3, a1, np.divide, answer2)
 
     # Both dimensionless quantities
     a1 = YTArray([1., 2., 3.])
@@ -370,14 +370,14 @@
     answer1 = YTArray([0.25, 0.4, 0.5])
     answer2 = YTArray([4, 2.5, 2])
 
-    yield operate_and_compare, a1, a2, op, answer1
-    yield operate_and_compare, a2, a1, op, answer2
-    yield operate_and_compare, a1, a3, op, answer1
-    yield operate_and_compare, a3, a1, op, answer2
-    yield operate_and_compare, a1, a3, np.divide, answer1
-    yield operate_and_compare, a3, a1, np.divide, answer2
-    yield operate_and_compare, a1, a3, np.divide, answer1
-    yield operate_and_compare, a3, a1, np.divide, answer2
+    operate_and_compare(a1, a2, op, answer1)
+    operate_and_compare(a2, a1, op, answer2)
+    operate_and_compare(a1, a3, op, answer1)
+    operate_and_compare(a3, a1, op, answer2)
+    operate_and_compare(a1, a3, np.divide, answer1)
+    operate_and_compare(a3, a1, np.divide, answer2)
+    operate_and_compare(a1, a3, np.divide, answer1)
+    operate_and_compare(a3, a1, np.divide, answer2)
 
 
 def test_power():
@@ -432,23 +432,23 @@
     )
 
     for op, answer in zip(ops, answers):
-        yield operate_and_compare, a1, a2, op, answer
+        operate_and_compare(a1, a2, op, answer)
     for op, answer in zip(ops, answers):
-        yield operate_and_compare, a1, dimless, op, answer
+        operate_and_compare(a1, dimless, op, answer)
 
     for op in ops:
-        yield assert_raises, YTUfuncUnitError, op, a1, a3
+        assert_raises(YTUfuncUnitError, op, a1, a3)
 
     for op, answer in zip(ops, answers):
-        yield operate_and_compare, a1, a3.in_units('cm'), op, answer
+        operate_and_compare(a1, a3.in_units('cm'), op, answer)
     
     # Check that comparisons with dimensionless quantities work in both directions.
-    yield operate_and_compare, a3, dimless, np.less, [True, True, True]
-    yield operate_and_compare, dimless, a3, np.less, [False, False, False]
-    yield assert_equal, a1 < 2, [True, False, False]
-    yield assert_equal, a1 < 2, np.less(a1, 2)
-    yield assert_equal, 2 < a1, [False, False, True]
-    yield assert_equal, 2 < a1, np.less(2, a1)
+    operate_and_compare(a3, dimless, np.less, [True, True, True])
+    operate_and_compare(dimless, a3, np.less, [False, False, False])
+    assert_equal(a1 < 2, [True, False, False])
+    assert_equal(a1 < 2, np.less(a1, 2))
+    assert_equal(2 < a1, [False, False, True])
+    assert_equal(2 < a1, np.less(2, a1))
 
 
 def test_unit_conversions():
@@ -464,60 +464,60 @@
     cm_unit = Unit('cm')
     kpc_unit = Unit('kpc')
 
-    yield assert_equal, km_in_cm, km
-    yield assert_equal, km_in_cm.in_cgs(), 1e5
-    yield assert_equal, km_in_cm.in_mks(), 1e3
-    yield assert_equal, km_in_cm.units, cm_unit
+    assert_equal(km_in_cm, km)
+    assert_equal(km_in_cm.in_cgs(), 1e5)
+    assert_equal(km_in_cm.in_mks(), 1e3)
+    assert_equal(km_in_cm.units, cm_unit)
 
     km_view = km.ndarray_view()
     km.convert_to_units('cm')
     assert_true(km_view.base is km.base)
 
-    yield assert_equal, km, YTQuantity(1, 'km')
-    yield assert_equal, km.in_cgs(), 1e5
-    yield assert_equal, km.in_mks(), 1e3
-    yield assert_equal, km.units, cm_unit
+    assert_equal(km, YTQuantity(1, 'km'))
+    assert_equal(km.in_cgs(), 1e5)
+    assert_equal(km.in_mks(), 1e3)
+    assert_equal(km.units, cm_unit)
 
     km.convert_to_units('kpc')
     assert_true(km_view.base is km.base)
 
-    yield assert_array_almost_equal_nulp, km, YTQuantity(1, 'km')
-    yield assert_array_almost_equal_nulp, km.in_cgs(), YTQuantity(1e5, 'cm')
-    yield assert_array_almost_equal_nulp, km.in_mks(), YTQuantity(1e3, 'm')
-    yield assert_equal, km.units, kpc_unit
+    assert_array_almost_equal_nulp(km, YTQuantity(1, 'km'))
+    assert_array_almost_equal_nulp(km.in_cgs(), YTQuantity(1e5, 'cm'))
+    assert_array_almost_equal_nulp(km.in_mks(), YTQuantity(1e3, 'm'))
+    assert_equal(km.units, kpc_unit)
 
-    yield assert_isinstance, km.to_ndarray(), np.ndarray
-    yield assert_isinstance, km.ndarray_view(), np.ndarray
+    assert_isinstance(km.to_ndarray(), np.ndarray)
+    assert_isinstance(km.ndarray_view(), np.ndarray)
 
     dyne = YTQuantity(1.0, 'dyne')
 
-    yield assert_equal, dyne.in_cgs(), dyne
-    yield assert_equal, dyne.in_cgs(), 1.0
-    yield assert_equal, dyne.in_mks(), dyne
-    yield assert_equal, dyne.in_mks(), 1e-5
-    yield assert_equal, str(dyne.in_mks().units), 'kg*m/s**2'
-    yield assert_equal, str(dyne.in_cgs().units), 'cm*g/s**2'
+    assert_equal(dyne.in_cgs(), dyne)
+    assert_equal(dyne.in_cgs(), 1.0)
+    assert_equal(dyne.in_mks(), dyne)
+    assert_equal(dyne.in_mks(), 1e-5)
+    assert_equal(str(dyne.in_mks().units), 'kg*m/s**2')
+    assert_equal(str(dyne.in_cgs().units), 'cm*g/s**2')
 
     em3 = YTQuantity(1.0, 'erg/m**3')
 
-    yield assert_equal, em3.in_cgs(), em3
-    yield assert_equal, em3.in_cgs(), 1e-6
-    yield assert_equal, em3.in_mks(), em3
-    yield assert_equal, em3.in_mks(), 1e-7
-    yield assert_equal, str(em3.in_mks().units), 'kg/(m*s**2)'
-    yield assert_equal, str(em3.in_cgs().units), 'g/(cm*s**2)'
+    assert_equal(em3.in_cgs(), em3)
+    assert_equal(em3.in_cgs(), 1e-6)
+    assert_equal(em3.in_mks(), em3)
+    assert_equal(em3.in_mks(), 1e-7)
+    assert_equal(str(em3.in_mks().units), 'kg/(m*s**2)')
+    assert_equal(str(em3.in_cgs().units), 'g/(cm*s**2)')
 
     em3_converted = YTQuantity(1545436840.386756, 'Msun/(Myr**2*kpc)')
-    yield assert_equal, em3.in_base(unit_system="galactic"), em3
-    yield assert_array_almost_equal, em3.in_base(unit_system="galactic"), em3_converted
-    yield assert_equal, str(em3.in_base(unit_system="galactic").units), 'Msun/(Myr**2*kpc)'
+    assert_equal(em3.in_base(unit_system="galactic"), em3)
+    assert_array_almost_equal(em3.in_base(unit_system="galactic"), em3_converted)
+    assert_equal(str(em3.in_base(unit_system="galactic").units), 'Msun/(Myr**2*kpc)')
 
     dimless = YTQuantity(1.0, "")
-    yield assert_equal, dimless.in_cgs(), dimless
-    yield assert_equal, dimless.in_cgs(), 1.0
-    yield assert_equal, dimless.in_mks(), dimless
-    yield assert_equal, dimless.in_mks(), 1.0
-    yield assert_equal, str(dimless.in_cgs().units), "dimensionless"
+    assert_equal(dimless.in_cgs(), dimless)
+    assert_equal(dimless.in_cgs(), 1.0)
+    assert_equal(dimless.in_mks(), dimless)
+    assert_equal(dimless.in_mks(), 1.0)
+    assert_equal(str(dimless.in_cgs().units), "dimensionless")
 
 def test_temperature_conversions():
     """
@@ -542,32 +542,32 @@
     balmy_view = balmy.ndarray_view()
 
     balmy.convert_to_units('degF')
-    yield assert_true, balmy_view.base is balmy.base
-    yield assert_array_almost_equal, np.array(balmy), np.array(balmy_F)
+    assert_true(balmy_view.base is balmy.base)
+    assert_array_almost_equal(np.array(balmy), np.array(balmy_F))
 
     balmy.convert_to_units('degC')
-    yield assert_true, balmy_view.base is balmy.base
-    yield assert_array_almost_equal, np.array(balmy), np.array(balmy_C)
+    assert_true(balmy_view.base is balmy.base)
+    assert_array_almost_equal(np.array(balmy), np.array(balmy_C))
 
     balmy.convert_to_units('R')
-    yield assert_true, balmy_view.base is balmy.base
-    yield assert_array_almost_equal, np.array(balmy), np.array(balmy_R)
+    assert_true(balmy_view.base is balmy.base)
+    assert_array_almost_equal(np.array(balmy), np.array(balmy_R))
 
     balmy.convert_to_units('degF')
-    yield assert_true, balmy_view.base is balmy.base
-    yield assert_array_almost_equal, np.array(balmy), np.array(balmy_F)
+    assert_true(balmy_view.base is balmy.base)
+    assert_array_almost_equal(np.array(balmy), np.array(balmy_F))
 
-    yield assert_raises, InvalidUnitOperation, np.multiply, balmy, km
+    assert_raises(InvalidUnitOperation, np.multiply, balmy, km)
 
     # Does CGS conversion from F to K work?
-    yield assert_array_almost_equal, balmy.in_cgs(), YTQuantity(300, 'K')
+    assert_array_almost_equal(balmy.in_cgs(), YTQuantity(300, 'K'))
 
 
 def test_yt_array_yt_quantity_ops():
     """
     Test operations that combine YTArray and YTQuantity
     """
-    a = YTArray(range(10), 'cm')
+    a = YTArray(range(10, 1), 'cm')
     b = YTQuantity(5, 'g')
 
     assert_isinstance(a*b, YTArray)
@@ -595,16 +595,15 @@
     a_boolean_index = a[a > 5]
     a_selection = a[0]
 
-    yield assert_array_equal, a_slice, YTArray([0, 1, 2], 'cm')
-    yield assert_array_equal, a_fancy_index, YTArray([1, 1, 3, 5], 'cm')
-    yield assert_array_equal, a_array_fancy_index, \
-        YTArray([[1, 1, ], [3, 5]], 'cm')
-    yield assert_array_equal, a_boolean_index, YTArray([6, 7, 8, 9], 'cm')
-    yield assert_isinstance, a_selection, YTQuantity
+    assert_array_equal(a_slice, YTArray([0, 1, 2], 'cm'))
+    assert_array_equal(a_fancy_index, YTArray([1, 1, 3, 5], 'cm'))
+    assert_array_equal(a_array_fancy_index, YTArray([[1, 1, ], [3, 5]], 'cm'))
+    assert_array_equal(a_boolean_index, YTArray([6, 7, 8, 9], 'cm'))
+    assert_isinstance(a_selection, YTQuantity)
 
     # .base points to the original array for a numpy view.  If it is not a
     # view, .base is None.
-    yield assert_true, a_slice.base is a
+    assert_true(a_slice.base is a)
 
 
 def test_iteration():
@@ -614,8 +613,8 @@
     a = np.arange(3)
     b = YTArray(np.arange(3), 'cm')
     for ia, ib, in zip(a, b):
-        yield assert_equal, ia, ib.value
-        yield assert_equal, ib.units, b.units
+        assert_equal(ia, ib.value)
+        assert_equal(ib.units, b.units)
 
 
 def test_fix_length():
@@ -625,7 +624,7 @@
     ds = fake_random_ds(64, nprocs=1, length_unit=10)
     length = ds.quan(1.0, 'code_length')
     new_length = fix_length(length, ds=ds)
-    yield assert_equal, YTQuantity(10, 'cm'), new_length
+    assert_equal(YTQuantity(10, 'cm'), new_length)
 
 def test_code_unit_combinations():
     """
@@ -665,29 +664,27 @@
             loaded_data = pickle.load(fname)
         os.unlink(tempf.name)
 
-        yield assert_array_equal, data, loaded_data
-        yield assert_equal, data.units, loaded_data.units
-        yield assert_array_equal, array(data.in_cgs()), \
-            array(loaded_data.in_cgs())
-        yield assert_equal, float(data.units.base_value), \
-            float(loaded_data.units.base_value)
+        assert_array_equal(data, loaded_data)
+        assert_equal(data.units, loaded_data.units)
+        assert_array_equal(array(data.in_cgs()), array(loaded_data.in_cgs()))
+        assert_equal(float(data.units.base_value), float(loaded_data.units.base_value))
 
 
 def test_copy():
     quan = YTQuantity(1, 'g')
     arr = YTArray([1, 2, 3], 'cm')
 
-    yield assert_equal, copy.copy(quan), quan
-    yield assert_array_equal, copy.copy(arr), arr
+    assert_equal(copy.copy(quan), quan)
+    assert_array_equal(copy.copy(arr), arr)
 
-    yield assert_equal,  copy.deepcopy(quan), quan
-    yield assert_array_equal, copy.deepcopy(arr), arr
+    assert_equal( copy.deepcopy(quan), quan)
+    assert_array_equal(copy.deepcopy(arr), arr)
 
-    yield assert_equal, quan.copy(), quan
-    yield assert_array_equal, arr.copy(), arr
+    assert_equal(quan.copy(), quan)
+    assert_array_equal(arr.copy(), arr)
 
-    yield assert_equal, np.copy(quan), quan
-    yield assert_array_equal, np.copy(arr), arr
+    assert_equal(np.copy(quan), quan)
+    assert_array_equal(np.copy(arr), arr)
 
 
 def unary_ufunc_comparison(ufunc, a):
@@ -706,18 +703,18 @@
                    np.isfinite, np.isinf, np.isnan, np.signbit, np.sign,
                    np.rint, np.logical_not):
         # These operations should return identical results compared to numpy.
+        with np.errstate(invalid='ignore'):
+            try:
+                ret = ufunc(a, out=out)
+            except YTUnitOperationError:
+                assert_true(ufunc in (np.deg2rad, np.rad2deg))
+                ret = ufunc(YTArray(a, '1'))
 
-        try:
-            ret = ufunc(a, out=out)
-        except YTUnitOperationError:
-            assert_true(ufunc in (np.deg2rad, np.rad2deg))
-            ret = ufunc(YTArray(a, '1'))
-
-        assert_array_equal(ret, out)
-        assert_array_equal(ret, ufunc(a_array))
-        # In-place copies do not drop units.
-        assert_true(hasattr(out, 'units'))
-        assert_true(not hasattr(ret, 'units'))
+            assert_array_equal(ret, out)
+            assert_array_equal(ret, ufunc(a_array))
+            # In-place copies do not drop units.
+            assert_true(hasattr(out, 'units'))
+            assert_true(not hasattr(ret, 'units'))
     elif ufunc in (np.absolute, np.fabs, np.conjugate, np.floor, np.ceil,
                    np.trunc, np.negative, np.spacing):
         ret = ufunc(a, out=out)
@@ -729,10 +726,12 @@
         if ufunc is np.ones_like:
             ret = ufunc(a)
         else:
-            ret = ufunc(a, out=out)
+            with np.errstate(invalid='ignore'):
+                ret = ufunc(a, out=out)
             assert_array_equal(ret, out)
-
-        assert_array_equal(ret.to_ndarray(), ufunc(a_array))
+    
+        with np.errstate(invalid='ignore'):
+            assert_array_equal(ret.to_ndarray(), ufunc(a_array))
         if ufunc is np.square:
             assert_true(out.units == a.units**2)
             assert_true(ret.units == a.units**2)
@@ -801,9 +800,9 @@
 
 def test_ufuncs():
     for ufunc in unary_operators:
-        yield unary_ufunc_comparison, ufunc, YTArray([.3, .4, .5], 'cm')
-        yield unary_ufunc_comparison, ufunc, YTArray([12, 23, 47], 'g')
-        yield unary_ufunc_comparison, ufunc, YTArray([2, 4, -6], 'erg/m**3')
+        unary_ufunc_comparison(ufunc, YTArray([.3, .4, .5], 'cm'))
+        unary_ufunc_comparison(ufunc, YTArray([12, 23, 47], 'g'))
+        unary_ufunc_comparison(ufunc, YTArray([2, 4, -6], 'erg/m**3'))
 
     for ufunc in binary_operators:
 
@@ -814,8 +813,8 @@
             b = YTArray([.1, .2, .3], 'dimensionless')
             c = np.array(b)
             d = YTArray([1., 2., 3.], 'g')
-            yield binary_ufunc_comparison, ufunc, a, b
-            yield binary_ufunc_comparison, ufunc, a, c
+            binary_ufunc_comparison(ufunc, a, b)
+            binary_ufunc_comparison(ufunc, a, c)
             assert_raises(YTUnitOperationError, ufunc, a, d)
             continue
 
@@ -826,30 +825,30 @@
         e = YTArray([.1, .2, .3], 'erg/m**3')
 
         for pair in itertools.product([a, b, c, d, e], repeat=2):
-            yield binary_ufunc_comparison, ufunc, pair[0], pair[1]
+            binary_ufunc_comparison(ufunc, pair[0], pair[1])
 
 
 def test_convenience():
 
     arr = YTArray([1, 2, 3], 'cm')
 
-    yield assert_equal, arr.unit_quantity, YTQuantity(1, 'cm')
-    yield assert_equal, arr.uq, YTQuantity(1, 'cm')
-    yield assert_isinstance, arr.unit_quantity, YTQuantity
-    yield assert_isinstance, arr.uq, YTQuantity
+    assert_equal(arr.unit_quantity, YTQuantity(1, 'cm'))
+    assert_equal(arr.uq, YTQuantity(1, 'cm'))
+    assert_isinstance(arr.unit_quantity, YTQuantity)
+    assert_isinstance(arr.uq, YTQuantity)
 
-    yield assert_array_equal, arr.unit_array, YTArray(np.ones_like(arr), 'cm')
-    yield assert_array_equal, arr.ua, YTArray(np.ones_like(arr), 'cm')
-    yield assert_isinstance, arr.unit_array, YTArray
-    yield assert_isinstance, arr.ua, YTArray
+    assert_array_equal(arr.unit_array, YTArray(np.ones_like(arr), 'cm'))
+    assert_array_equal(arr.ua, YTArray(np.ones_like(arr), 'cm'))
+    assert_isinstance(arr.unit_array, YTArray)
+    assert_isinstance(arr.ua, YTArray)
 
-    yield assert_array_equal, arr.ndview, arr.view(np.ndarray)
-    yield assert_array_equal, arr.d, arr.view(np.ndarray)
-    yield assert_true, arr.ndview.base is arr.base
-    yield assert_true, arr.d.base is arr.base
+    assert_array_equal(arr.ndview, arr.view(np.ndarray))
+    assert_array_equal(arr.d, arr.view(np.ndarray))
+    assert_true(arr.ndview.base is arr.base)
+    assert_true(arr.d.base is arr.base)
 
-    yield assert_array_equal, arr.value, np.array(arr)
-    yield assert_array_equal, arr.v, np.array(arr)
+    assert_array_equal(arr.value, np.array(arr))
+    assert_array_equal(arr.v, np.array(arr))
 
 
 def test_registry_association():
@@ -859,7 +858,7 @@
     c = ds.quan(6, '')
     d = 5
 
-    yield assert_equal, id(a.units.registry), id(ds.unit_registry)
+    assert_equal(id(a.units.registry), id(ds.unit_registry))
 
     def binary_op_registry_comparison(op):
         e = op(a, b)
@@ -884,10 +883,10 @@
     if hasattr(operator, "div"):
         binary_ops.append(operator.div)
     for op in binary_ops:
-        yield binary_op_registry_comparison, op
+        binary_op_registry_comparison(op)
 
     for op in [operator.abs, operator.neg, operator.pos]:
-        yield unary_op_registry_comparison, op
+        unary_op_registry_comparison(op)
 
 @requires_module("astropy")
 def test_astropy():
@@ -901,16 +900,16 @@
     yt_quan = YTQuantity(10., "sqrt(Msun)/kpc**3")
     yt_quan2 = YTQuantity.from_astropy(ap_quan)
 
-    yield assert_array_equal, ap_arr, yt_arr.to_astropy()
-    yield assert_array_equal, yt_arr, YTArray.from_astropy(ap_arr)
-    yield assert_array_equal, yt_arr, yt_arr2
+    assert_array_equal(ap_arr, yt_arr.to_astropy())
+    assert_array_equal(yt_arr, YTArray.from_astropy(ap_arr))
+    assert_array_equal(yt_arr, yt_arr2)
 
-    yield assert_equal, ap_quan, yt_quan.to_astropy()
-    yield assert_equal, yt_quan, YTQuantity.from_astropy(ap_quan)
-    yield assert_equal, yt_quan, yt_quan2
+    assert_equal(ap_quan, yt_quan.to_astropy())
+    assert_equal(yt_quan, YTQuantity.from_astropy(ap_quan))
+    assert_equal(yt_quan, yt_quan2)
 
-    yield assert_array_equal, yt_arr, YTArray.from_astropy(yt_arr.to_astropy())
-    yield assert_equal, yt_quan, YTQuantity.from_astropy(yt_quan.to_astropy())
+    assert_array_equal(yt_arr, YTArray.from_astropy(yt_arr.to_astropy()))
+    assert_equal(yt_quan, YTQuantity.from_astropy(yt_quan.to_astropy()))
 
 @requires_module("pint")
 def test_pint():
@@ -926,18 +925,18 @@
     yt_quan = YTQuantity(10., "sqrt(g)/mm**3")
     yt_quan2 = YTQuantity.from_pint(p_quan)
 
-    yield assert_array_equal, p_arr, yt_arr.to_pint()
+    assert_array_equal(p_arr, yt_arr.to_pint())
     assert_equal(p_quan, yt_quan.to_pint())
-    yield assert_array_equal, yt_arr, YTArray.from_pint(p_arr)
-    yield assert_array_equal, yt_arr, yt_arr2
+    assert_array_equal(yt_arr, YTArray.from_pint(p_arr))
+    assert_array_equal(yt_arr, yt_arr2)
 
-    yield assert_equal, p_quan.magnitude, yt_quan.to_pint().magnitude
+    assert_equal(p_quan.magnitude, yt_quan.to_pint().magnitude)
     assert_equal(p_quan, yt_quan.to_pint())
-    yield assert_equal, yt_quan, YTQuantity.from_pint(p_quan)
-    yield assert_equal, yt_quan, yt_quan2
+    assert_equal(yt_quan, YTQuantity.from_pint(p_quan))
+    assert_equal(yt_quan, yt_quan2)
 
-    yield assert_array_equal, yt_arr, YTArray.from_pint(yt_arr.to_pint())
-    yield assert_equal, yt_quan, YTQuantity.from_pint(yt_quan.to_pint())
+    assert_array_equal(yt_arr, YTArray.from_pint(yt_arr.to_pint()))
+    assert_equal(yt_quan, YTQuantity.from_pint(yt_quan.to_pint()))
 
 def test_subclass():
 
@@ -991,14 +990,14 @@
 
     iarr = YTArray.from_hdf5('test.h5')
 
-    yield assert_equal, warr, iarr
-    yield assert_equal, warr.units.registry['code_length'], iarr.units.registry['code_length']
+    assert_equal(warr, iarr)
+    assert_equal(warr.units.registry['code_length'], iarr.units.registry['code_length'])
 
     warr.write_hdf5('test.h5', dataset_name="test_dset", group_name='/arrays/test_group')
 
     giarr = YTArray.from_hdf5('test.h5', dataset_name="test_dset", group_name='/arrays/test_group')
 
-    yield assert_equal, warr, giarr
+    assert_equal(warr, giarr)
 
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
@@ -1011,83 +1010,82 @@
     # Mass-energy
 
     E = mp.to_equivalent("keV","mass_energy")
-    yield assert_equal, E, mp*clight*clight
-    yield assert_allclose_units, mp, E.to_equivalent("g", "mass_energy")
+    assert_equal(E, mp*clight*clight)
+    assert_allclose_units(mp, E.to_equivalent("g", "mass_energy"))
 
     # Thermal
 
     T = YTQuantity(1.0e8,"K")
     E = T.to_equivalent("W*hr","thermal")
-    yield assert_equal, E, (kboltz*T).in_units("W*hr")
-    yield assert_allclose_units, T, E.to_equivalent("K", "thermal")
+    assert_equal(E, (kboltz*T).in_units("W*hr"))
+    assert_allclose_units(T, E.to_equivalent("K", "thermal"))
 
     # Spectral
 
     l = YTQuantity(4000.,"angstrom")
     nu = l.to_equivalent("Hz","spectral")
-    yield assert_equal, nu, clight/l
+    assert_equal(nu, clight/l)
     E = hcgs*nu
     l2 = E.to_equivalent("angstrom", "spectral")
-    yield assert_allclose_units, l, l2
+    assert_allclose_units(l, l2)
     nu2 = clight/l2.in_units("cm")
-    yield assert_allclose_units, nu, nu2
+    assert_allclose_units(nu, nu2)
     E2 = nu2.to_equivalent("keV", "spectral")
-    yield assert_allclose_units, E2, E.in_units("keV")
+    assert_allclose_units(E2, E.in_units("keV"))
 
     # Sound-speed
 
     mu = 0.6
     gg = 5./3.
     c_s = T.to_equivalent("km/s","sound_speed")
-    yield assert_equal, c_s, np.sqrt(gg*kboltz*T/(mu*mh))
-    yield assert_allclose_units, T, c_s.to_equivalent("K","sound_speed")
+    assert_equal(c_s, np.sqrt(gg*kboltz*T/(mu*mh)))
+    assert_allclose_units(T, c_s.to_equivalent("K","sound_speed"))
 
     mu = 0.5
     gg = 4./3.
     c_s = T.to_equivalent("km/s","sound_speed", mu=mu, gamma=gg)
-    yield assert_equal, c_s, np.sqrt(gg*kboltz*T/(mu*mh))
-    yield assert_allclose_units, T, c_s.to_equivalent("K","sound_speed",
-                                                    mu=mu, gamma=gg)
+    assert_equal(c_s, np.sqrt(gg*kboltz*T/(mu*mh)))
+    assert_allclose_units(T, c_s.to_equivalent("K","sound_speed", mu=mu, gamma=gg))
 
     # Lorentz
 
     v = 0.8*clight
     g = v.to_equivalent("dimensionless","lorentz")
     g2 = YTQuantity(1./np.sqrt(1.-0.8*0.8), "dimensionless")
-    yield assert_allclose_units, g, g2
+    assert_allclose_units(g, g2)
     v2 = g2.to_equivalent("mile/hr", "lorentz")
-    yield assert_allclose_units, v2, v.in_units("mile/hr")
+    assert_allclose_units(v2, v.in_units("mile/hr"))
 
     # Schwarzschild
 
     R = mass_sun_cgs.to_equivalent("kpc","schwarzschild")
-    yield assert_equal, R.in_cgs(), 2*G*mass_sun_cgs/(clight*clight)
-    yield assert_allclose_units, mass_sun_cgs, R.to_equivalent("g", "schwarzschild")
+    assert_equal(R.in_cgs(), 2*G*mass_sun_cgs/(clight*clight))
+    assert_allclose_units(mass_sun_cgs, R.to_equivalent("g", "schwarzschild"))
 
     # Compton
 
     l = me.to_equivalent("angstrom","compton")
-    yield assert_equal, l, hcgs/(me*clight)
-    yield assert_allclose_units, me, l.to_equivalent("g", "compton")
+    assert_equal(l, hcgs/(me*clight))
+    assert_allclose_units(me, l.to_equivalent("g", "compton"))
 
     # Number density
 
     rho = mp/u.cm**3
 
     n = rho.to_equivalent("cm**-3","number_density")
-    yield assert_equal, n, rho/(mh*0.6)
-    yield assert_allclose_units, rho, n.to_equivalent("g/cm**3","number_density")
+    assert_equal(n, rho/(mh*0.6))
+    assert_allclose_units(rho, n.to_equivalent("g/cm**3","number_density"))
 
     n = rho.to_equivalent("cm**-3","number_density", mu=0.75)
-    yield assert_equal, n, rho/(mh*0.75)
-    yield assert_allclose_units, rho, n.to_equivalent("g/cm**3","number_density", mu=0.75)
+    assert_equal(n, rho/(mh*0.75))
+    assert_allclose_units(rho, n.to_equivalent("g/cm**3","number_density", mu=0.75))
 
     # Effective temperature
 
     T = YTQuantity(1.0e4, "K")
     F = T.to_equivalent("erg/s/cm**2","effective_temperature")
-    yield assert_equal, F, stefan_boltzmann_constant_cgs*T**4
-    yield assert_allclose_units, T, F.to_equivalent("K", "effective_temperature")
+    assert_equal(F, stefan_boltzmann_constant_cgs*T**4)
+    assert_allclose_units(T, F.to_equivalent("K", "effective_temperature"))
 
 def test_electromagnetic():
     from yt.units.dimensions import charge_mks, pressure, current_cgs, \
@@ -1098,46 +1096,46 @@
     # Various tests of SI and CGS electromagnetic units
 
     qp_mks = qp.to_equivalent("C", "SI")
-    yield assert_equal, qp_mks.units.dimensions, charge_mks
-    yield assert_array_almost_equal, qp_mks.v, 10.0*qp.v/speed_of_light_cm_per_s
+    assert_equal(qp_mks.units.dimensions, charge_mks)
+    assert_array_almost_equal(qp_mks.v, 10.0*qp.v/speed_of_light_cm_per_s)
 
     qp_cgs = qp_mks.to_equivalent("esu", "CGS")
-    yield assert_array_almost_equal, qp_cgs, qp
-    yield assert_equal, qp_cgs.units.dimensions, qp.units.dimensions
+    assert_array_almost_equal(qp_cgs, qp)
+    assert_equal(qp_cgs.units.dimensions, qp.units.dimensions)
     
     qp_mks_k = qp.to_equivalent("kC", "SI")
-    yield assert_array_almost_equal, qp_mks_k.v, 1.0e-2*qp.v/speed_of_light_cm_per_s
+    assert_array_almost_equal(qp_mks_k.v, 1.0e-2*qp.v/speed_of_light_cm_per_s)
 
     B = YTQuantity(1.0, "T")
     B_cgs = B.to_equivalent("gauss", "CGS")
-    yield assert_equal, B.units.dimensions, magnetic_field_mks
-    yield assert_equal, B_cgs.units.dimensions, magnetic_field_cgs
-    yield assert_array_almost_equal, B_cgs, YTQuantity(1.0e4, "gauss")
+    assert_equal(B.units.dimensions, magnetic_field_mks)
+    assert_equal(B_cgs.units.dimensions, magnetic_field_cgs)
+    assert_array_almost_equal(B_cgs, YTQuantity(1.0e4, "gauss"))
 
     u_mks = B*B/(2*mu_0)
-    yield assert_equal, u_mks.units.dimensions, pressure
+    assert_equal(u_mks.units.dimensions, pressure)
     u_cgs = B_cgs*B_cgs/(8*np.pi)
-    yield assert_equal, u_cgs.units.dimensions, pressure
-    yield assert_array_almost_equal, u_mks.in_cgs(), u_cgs
+    assert_equal(u_cgs.units.dimensions, pressure)
+    assert_array_almost_equal(u_mks.in_cgs(), u_cgs)
     
     I = YTQuantity(1.0, "A")
     I_cgs = I.to_equivalent("statA", "CGS")
-    yield assert_array_almost_equal, I_cgs, YTQuantity(0.1*speed_of_light_cm_per_s, "statA")
-    yield assert_array_almost_equal, I_cgs.to_equivalent("mA", "SI"), I.in_units("mA")
-    yield assert_equal, I_cgs.units.dimensions, current_cgs
+    assert_array_almost_equal(I_cgs, YTQuantity(0.1*speed_of_light_cm_per_s, "statA"))
+    assert_array_almost_equal(I_cgs.to_equivalent("mA", "SI"), I.in_units("mA"))
+    assert_equal(I_cgs.units.dimensions, current_cgs)
     
     R = YTQuantity(1.0, "ohm")
     R_cgs = R.to_equivalent("statohm", "CGS")
     P_mks = I*I*R
     P_cgs = I_cgs*I_cgs*R_cgs
-    yield assert_equal, P_mks.units.dimensions, power
-    yield assert_equal, P_cgs.units.dimensions, power
-    yield assert_array_almost_equal, P_cgs.in_cgs(), P_mks.in_cgs()
-    yield assert_array_almost_equal, P_cgs.in_mks(), YTQuantity(1.0, "W")
+    assert_equal(P_mks.units.dimensions, power)
+    assert_equal(P_cgs.units.dimensions, power)
+    assert_array_almost_equal(P_cgs.in_cgs(), P_mks.in_cgs())
+    assert_array_almost_equal(P_cgs.in_mks(), YTQuantity(1.0, "W"))
     
     V = YTQuantity(1.0, "statV")
     V_mks = V.to_equivalent("V", "SI")
-    yield assert_array_almost_equal, V_mks.v, 1.0e8*V.v/speed_of_light_cm_per_s
+    assert_array_almost_equal(V_mks.v, 1.0e8*V.v/speed_of_light_cm_per_s)
 
 def test_ytarray_coercion():
     a = YTArray([1, 2, 3], 'cm')
@@ -1157,23 +1155,21 @@
     intersect_answer = [2, 3]
     union_answer = [1, 2, 3, 4, 5, 6]
 
-    yield (assert_array_equal, YTArray(catenate_answer, 'cm'),
-           uconcatenate((a1, a2)))
-    yield assert_array_equal, catenate_answer, np.concatenate((a1, a2))
+    assert_array_equal(YTArray(catenate_answer, 'cm'), uconcatenate((a1, a2)))
+    assert_array_equal(catenate_answer, np.concatenate((a1, a2)))
 
-    yield (assert_array_equal, YTArray(intersect_answer, 'cm'),
-           uintersect1d(a1, a2))
-    yield assert_array_equal, intersect_answer, np.intersect1d(a1, a2)
+    assert_array_equal(YTArray(intersect_answer, 'cm'), uintersect1d(a1, a2))
+    assert_array_equal(intersect_answer, np.intersect1d(a1, a2))
 
-    yield assert_array_equal, YTArray(union_answer, 'cm'), uunion1d(a1, a2)
-    yield assert_array_equal, union_answer, np.union1d(a1, a2)
+    assert_array_equal(YTArray(union_answer, 'cm'), uunion1d(a1, a2))
+    assert_array_equal(union_answer, np.union1d(a1, a2))
 
 def test_dimensionless_conversion():
     a = YTQuantity(1, 'Zsun')
     b = a.in_units('Zsun')
     a.convert_to_units('Zsun')
-    yield assert_true, a.units.base_value == metallicity_sun
-    yield assert_true, b.units.base_value == metallicity_sun
+    assert_true(a.units.base_value == metallicity_sun)
+    assert_true(b.units.base_value == metallicity_sun)
 
 def test_modified_unit_division():
     ds1 = fake_random_ds(64)
@@ -1187,9 +1183,9 @@
     b = ds2.quan(3, 'm')
 
     ret = a/b
-    yield assert_true, ret == 0.5
-    yield assert_true, ret.units.is_dimensionless
-    yield assert_true, ret.units.base_value == 1.0
+    assert_true(ret == 0.5)
+    assert_true(ret.units.is_dimensionless)
+    assert_true(ret.units.base_value == 1.0)
 
 def test_load_and_save():
     tmpdir = tempfile.mkdtemp()
@@ -1204,8 +1200,8 @@
 
     d, e = loadtxt("arrays.dat", usecols=(1,2), delimiter=",")
 
-    yield assert_array_equal, b, d
-    yield assert_array_equal, c, e
+    assert_array_equal(b, d)
+    assert_array_equal(c, e)
 
     os.chdir(curdir)
     shutil.rmtree(tmpdir)


https://bitbucket.org/yt_analysis/yt/commits/e39e1d0e7533/
Changeset:   e39e1d0e7533
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 17:54:22+00:00
Summary:     eliminate yield assert from yt.data_objects
Affected #:  19 files

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_chunking.py
--- a/yt/data_objects/tests/test_chunking.py
+++ b/yt/data_objects/tests/test_chunking.py
@@ -34,7 +34,7 @@
                 coords['i'][t] = uconcatenate(coords['i'][t])
                 coords['f'][t].sort()
                 coords['i'][t].sort()
-            yield assert_equal, coords['f']['io'], coords['f']['all']
-            yield assert_equal, coords['f']['io'], coords['f']['spatial']
-            yield assert_equal, coords['i']['io'], coords['i']['all']
-            yield assert_equal, coords['i']['io'], coords['i']['spatial']
+            assert_equal(coords['f']['io'], coords['f']['all'])
+            assert_equal(coords['f']['io'], coords['f']['spatial'])
+            assert_equal(coords['i']['io'], coords['i']['all'])
+            assert_equal(coords['i']['io'], coords['i']['spatial'])

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_compose.py
--- a/yt/data_objects/tests/test_compose.py
+++ b/yt/data_objects/tests/test_compose.py
@@ -56,24 +56,24 @@
         # subselect non-overlapping 0, 1, 2, 3D regions
         for data1 in sources:
             data2 = ds.sphere(center, radius, data_source=data1)
-            yield assert_array_equal, data2['index', 'ID'], empty
+            assert_array_equal(data2['index', 'ID'], empty)
 
             data2 = ds.region(center, left_edge, right_edge, data_source=data1)
-            yield assert_array_equal, data2['index', 'ID'], empty  
+            assert_array_equal(data2['index', 'ID'], empty  )
 
             data2 = ds.disk(center, normal, radius, height, data_source=data1)
-            yield assert_array_equal, data2['index', 'ID'], empty
+            assert_array_equal(data2['index', 'ID'], empty)
 
             for d in range(3):
                 data2 = ds.slice(d, center[d], data_source=data1)
-                yield assert_array_equal, data2['index', 'ID'], empty
+                assert_array_equal(data2['index', 'ID'], empty)
 
             for d in range(3):
                 data2 = ds.ortho_ray(d, center[0:d] + center[d+1:], data_source=data1)
-                yield assert_array_equal, data2['index', 'ID'], empty
+                assert_array_equal(data2['index', 'ID'], empty)
 
             data2 = ds.point(center, data_source=data1)
-            yield assert_array_equal, data2['index', 'ID'], empty
+            assert_array_equal(data2['index', 'ID'], empty)
 
 def test_compose_overlap():
     r"""Test to make sure that composed data objects that do
@@ -109,21 +109,21 @@
             id2 = data2['index', 'ID']
             id3 = data3['index', 'ID']
             id3.sort()
-            yield assert_array_equal, uintersect1d(id1, id2), id3
+            assert_array_equal(uintersect1d(id1, id2), id3)
 
             data2 = ds.region(center, left_edge, right_edge)
             data3 = ds.region(center, left_edge, right_edge, data_source=data1)
             id2 = data2['index', 'ID']
             id3 = data3['index', 'ID']
             id3.sort()
-            yield assert_array_equal, uintersect1d(id1, id2), id3
+            assert_array_equal(uintersect1d(id1, id2), id3)
 
             data2 = ds.disk(center, normal, radius, height)
             data3 = ds.disk(center, normal, radius, height, data_source=data1)
             id2 = data2['index', 'ID']
             id3 = data3['index', 'ID']
             id3.sort()
-            yield assert_array_equal, uintersect1d(id1, id2), id3
+            assert_array_equal(uintersect1d(id1, id2), id3)
 
             for d in range(3):
                 data2 = ds.slice(d, center[d])
@@ -131,7 +131,7 @@
                 id2 = data2['index', 'ID']
                 id3 = data3['index', 'ID']
                 id3.sort()
-                yield assert_array_equal, uintersect1d(id1, id2), id3
+                assert_array_equal(uintersect1d(id1, id2), id3)
 
             for d in range(3):
                 data2 = ds.ortho_ray(d, center[0:d] + center[d+1:])
@@ -139,11 +139,11 @@
                 id2 = data2['index', 'ID']
                 id3 = data3['index', 'ID']
                 id3.sort()
-                yield assert_array_equal, uintersect1d(id1, id2), id3
+                assert_array_equal(uintersect1d(id1, id2), id3)
 
             data2 = ds.point(center)
             data3 = ds.point(center, data_source=data1)
             id2 = data2['index', 'ID']
             id3 = data3['index', 'ID']
             id3.sort()
-            yield assert_array_equal, uintersect1d(id1, id2), id3
+            assert_array_equal(uintersect1d(id1, id2), id3)

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -22,42 +22,42 @@
             cg = ds.covering_grid(level, [0.0, 0.0, 0.0],
                     dn * ds.domain_dimensions)
             # Test coordinate generation
-            yield assert_equal, np.unique(cg["dx"]).size, 1
+            assert_equal(np.unique(cg["dx"]).size, 1)
             xmi = cg["x"].min()
             xma = cg["x"].max()
             dx = cg["dx"].flat[0:1]
             edges = ds.arr([[0,1],[0,1],[0,1]], 'code_length')
-            yield assert_equal, xmi, edges[0,0] + dx/2.0
-            yield assert_equal, xmi, cg["x"][0,0,0]
-            yield assert_equal, xmi, cg["x"][0,1,1]
-            yield assert_equal, xma, edges[0,1] - dx/2.0
-            yield assert_equal, xma, cg["x"][-1,0,0]
-            yield assert_equal, xma, cg["x"][-1,1,1]
-            yield assert_equal, np.unique(cg["dy"]).size, 1
+            assert_equal(xmi, edges[0,0] + dx/2.0)
+            assert_equal(xmi, cg["x"][0,0,0])
+            assert_equal(xmi, cg["x"][0,1,1])
+            assert_equal(xma, edges[0,1] - dx/2.0)
+            assert_equal(xma, cg["x"][-1,0,0])
+            assert_equal(xma, cg["x"][-1,1,1])
+            assert_equal(np.unique(cg["dy"]).size, 1)
             ymi = cg["y"].min()
             yma = cg["y"].max()
             dy = cg["dy"][0]
-            yield assert_equal, ymi, edges[1,0] + dy/2.0
-            yield assert_equal, ymi, cg["y"][0,0,0]
-            yield assert_equal, ymi, cg["y"][1,0,1]
-            yield assert_equal, yma, edges[1,1] - dy/2.0
-            yield assert_equal, yma, cg["y"][0,-1,0]
-            yield assert_equal, yma, cg["y"][1,-1,1]
-            yield assert_equal, np.unique(cg["dz"]).size, 1
+            assert_equal(ymi, edges[1,0] + dy/2.0)
+            assert_equal(ymi, cg["y"][0,0,0])
+            assert_equal(ymi, cg["y"][1,0,1])
+            assert_equal(yma, edges[1,1] - dy/2.0)
+            assert_equal(yma, cg["y"][0,-1,0])
+            assert_equal(yma, cg["y"][1,-1,1])
+            assert_equal(np.unique(cg["dz"]).size, 1)
             zmi = cg["z"].min()
             zma = cg["z"].max()
             dz = cg["dz"][0]
-            yield assert_equal, zmi, edges[2,0] + dz/2.0
-            yield assert_equal, zmi, cg["z"][0,0,0]
-            yield assert_equal, zmi, cg["z"][1,1,0]
-            yield assert_equal, zma, edges[2,1] - dz/2.0
-            yield assert_equal, zma, cg["z"][0,0,-1]
-            yield assert_equal, zma, cg["z"][1,1,-1]
+            assert_equal(zmi, edges[2,0] + dz/2.0)
+            assert_equal(zmi, cg["z"][0,0,0])
+            assert_equal(zmi, cg["z"][1,1,0])
+            assert_equal(zma, edges[2,1] - dz/2.0)
+            assert_equal(zma, cg["z"][0,0,-1])
+            assert_equal(zma, cg["z"][1,1,-1])
             # Now we test other attributes
-            yield assert_equal, cg["ones"].max(), 1.0
-            yield assert_equal, cg["ones"].min(), 1.0
-            yield assert_equal, cg["grid_level"], level
-            yield assert_equal, cg["cell_volume"].sum(), ds.domain_width.prod()
+            assert_equal(cg["ones"].max(), 1.0)
+            assert_equal(cg["ones"].min(), 1.0)
+            assert_equal(cg["grid_level"], level)
+            assert_equal(cg["cell_volume"].sum(), ds.domain_width.prod())
             for g in ds.index.grids:
                 di = g.get_global_startindex()
                 dd = g.ActiveDimensions
@@ -65,7 +65,7 @@
                     f = cg["density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
                                       dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
                                       dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
-                    yield assert_equal, f, g["density"]
+                    assert_equal(f, g["density"])
 
 def test_smoothed_covering_grid():
     # We decompose in different ways
@@ -75,9 +75,9 @@
             dn = ds.refine_by**level 
             cg = ds.smoothed_covering_grid(level, [0.0, 0.0, 0.0],
                     dn * ds.domain_dimensions)
-            yield assert_equal, cg["ones"].max(), 1.0
-            yield assert_equal, cg["ones"].min(), 1.0
-            yield assert_equal, cg["cell_volume"].sum(), ds.domain_width.prod()
+            assert_equal(cg["ones"].max(), 1.0)
+            assert_equal(cg["ones"].min(), 1.0)
+            assert_equal(cg["cell_volume"].sum(), ds.domain_width.prod())
             for g in ds.index.grids:
                 if level != g.Level: continue
                 di = g.get_global_startindex()
@@ -86,7 +86,7 @@
                     f = cg["density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
                                       dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
                                       dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
-                    yield assert_equal, f, g["density"]
+                    assert_equal(f, g["density"])
 
 
 def test_arbitrary_grid():
@@ -133,7 +133,7 @@
                     2**ref_level * ds.domain_dimensions)
             ag = ds.arbitrary_grid([0.0, 0.0, 0.0], [1.0, 1.0, 1.0],
                     2**ref_level * ds.domain_dimensions)
-            yield assert_almost_equal, cg["density"], ag["density"]
+            assert_almost_equal(cg["density"], ag["density"])
 
 output_00080 = "output_00080/info_00080.txt"
 @requires_file(output_00080)

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_cutting_plane.py
--- a/yt/data_objects/tests/test_cutting_plane.py
+++ b/yt/data_objects/tests/test_cutting_plane.py
@@ -26,9 +26,9 @@
         center = [0.5, 0.5, 0.5]
         normal = [1, 1, 1]
         cut = ds.cutting(normal, center)
-        yield assert_equal, cut["ones"].sum(), cut["ones"].size
-        yield assert_equal, cut["ones"].min(), 1.0
-        yield assert_equal, cut["ones"].max(), 1.0
+        assert_equal(cut["ones"].sum(), cut["ones"].size)
+        assert_equal(cut["ones"].min(), 1.0)
+        assert_equal(cut["ones"].max(), 1.0)
         pw = cut.to_pw(fields='density')
         for p in pw.plots.values():
             tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
@@ -39,20 +39,14 @@
             frb = cut.to_frb(width, 64)
             for cut_field in ['ones', 'density']:
                 fi = ds._get_field_info("unknown", cut_field)
-                yield assert_equal, frb[cut_field].info['data_source'], \
-                    cut.__str__()
-                yield assert_equal, frb[cut_field].info['axis'], \
-                    4
-                yield assert_equal, frb[cut_field].info['field'], \
-                    cut_field
-                yield assert_equal, frb[cut_field].units, \
-                    Unit(fi.units)
-                yield assert_equal, frb[cut_field].info['xlim'], \
-                    frb.bounds[:2]
-                yield assert_equal, frb[cut_field].info['ylim'], \
-                    frb.bounds[2:]
-                yield assert_equal, frb[cut_field].info['length_to_cm'], \
-                    ds.length_unit.in_cgs()
-                yield assert_equal, frb[cut_field].info['center'], \
-                    cut.center
+                assert_equal(frb[cut_field].info['data_source'],
+                             cut.__str__())
+                assert_equal(frb[cut_field].info['axis'], 4)
+                assert_equal(frb[cut_field].info['field'], cut_field)
+                assert_equal(frb[cut_field].units, Unit(fi.units))
+                assert_equal(frb[cut_field].info['xlim'], frb.bounds[:2])
+                assert_equal(frb[cut_field].info['ylim'], frb.bounds[2:])
+                assert_equal(frb[cut_field].info['length_to_cm'],
+                             ds.length_unit.in_cgs())
+                assert_equal(frb[cut_field].info['center'], cut.center)
     teardown_func(fns)

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_data_collection.py
--- a/yt/data_objects/tests/test_data_collection.py
+++ b/yt/data_objects/tests/test_data_collection.py
@@ -17,14 +17,14 @@
         crho = coll["density"].sum(dtype="float64").to_ndarray()
         grho = np.sum([g["density"].sum(dtype="float64") for g in ds.index.grids],
                       dtype="float64")
-        yield assert_rel_equal, np.array([crho]), np.array([grho]), 12
-        yield assert_equal, coll.size, ds.domain_dimensions.prod()
+        assert_rel_equal(np.array([crho]), np.array([grho]), 12)
+        assert_equal(coll.size, ds.domain_dimensions.prod())
         for gi in range(ds.index.num_grids):
             grids = ds.index.grids[:gi+1]
             coll = ds.data_collection(grids)
             crho = coll["density"].sum(dtype="float64")
             grho = np.sum([g["density"].sum(dtype="float64") for g in grids],
                           dtype="float64")
-            yield assert_rel_equal, np.array([crho]), np.array([grho]), 12
-            yield assert_equal, coll.size, \
-                    sum(g.ActiveDimensions.prod() for g in grids)
+            assert_rel_equal(np.array([crho]), np.array([grho]), 12)
+            assert_equal(coll.size,
+                         sum(g.ActiveDimensions.prod() for g in grids))

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_dataset_access.py
--- a/yt/data_objects/tests/test_dataset_access.py
+++ b/yt/data_objects/tests/test_dataset_access.py
@@ -37,34 +37,34 @@
     # First, no string units
     reg1 = ds.r[0.2:0.3,0.4:0.6,:]
     reg2 = ds.region([0.25, 0.5, 0.5], [0.2, 0.4, 0.0], [0.3, 0.6, 1.0])
-    yield assert_equal, reg1["density"], reg2["density"]
+    assert_equal(reg1["density"], reg2["density"])
 
     # Now, string units in some -- 1.0 == cm
     reg1 = ds.r[(0.1, 'cm'):(0.5, 'cm'), :, (0.25, 'cm'): (0.35, 'cm')]
     reg2 = ds.region([0.3, 0.5, 0.3], [0.1, 0.0, 0.25], [0.5, 1.0, 0.35])
-    yield assert_equal, reg1["density"], reg2["density"]
+    assert_equal(reg1["density"], reg2["density"])
 
     # Now, string units in some -- 1.0 == cm
     reg1 = ds.r[(0.1, 'cm'):(0.5, 'cm'), :, 0.25:0.35]
     reg2 = ds.region([0.3, 0.5, 0.3], [0.1, 0.0, 0.25], [0.5, 1.0, 0.35])
-    yield assert_equal, reg1["density"], reg2["density"]
+    assert_equal(reg1["density"], reg2["density"])
 
     # And, lots of : usage!
     reg1 = ds.r[:, :, :]
     reg2 = ds.all_data()
-    yield assert_equal, reg1["density"], reg2["density"]
+    assert_equal(reg1["density"], reg2["density"])
 
 def test_accessing_all_data():
     # This will test first that we can access all_data, and next that we can
     # access it multiple times and get the *same object*.
     ds = fake_amr_ds(fields=["density"])
     dd = ds.all_data()
-    yield assert_equal, ds.r["density"], dd["density"]
+    assert_equal(ds.r["density"], dd["density"])
     # Now let's assert that it's the same object
     rho = ds.r["density"]
     rho *= 2.0
-    yield assert_equal, dd["density"]*2.0, ds.r["density"]
-    yield assert_equal, dd["gas", "density"]*2.0, ds.r["gas", "density"]
+    assert_equal(dd["density"]*2.0, ds.r["density"])
+    assert_equal(dd["gas", "density"]*2.0, ds.r["gas", "density"])
 
 def test_particle_counts():
     ds = fake_random_ds(16, particles=100)

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_derived_quantities.py
--- a/yt/data_objects/tests/test_derived_quantities.py
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -15,17 +15,17 @@
                 "velocity_x", "velocity_y", "velocity_z"))
         for sp in [ds.sphere("c", (0.25, 'unitary')), ds.r[0.5,:,:]]:
             mi, ma = sp.quantities["Extrema"]("density")
-            yield assert_equal, mi, np.nanmin(sp["density"])
-            yield assert_equal, ma, np.nanmax(sp["density"])
+            assert_equal(mi, np.nanmin(sp["density"]))
+            assert_equal(ma, np.nanmax(sp["density"]))
             dd = ds.all_data()
             mi, ma = dd.quantities["Extrema"]("density")
-            yield assert_equal, mi, np.nanmin(dd["density"])
-            yield assert_equal, ma, np.nanmax(dd["density"])
+            assert_equal(mi, np.nanmin(dd["density"]))
+            assert_equal(ma, np.nanmax(dd["density"]))
             sp = ds.sphere("max", (0.25, 'unitary'))
-            yield assert_equal, np.any(np.isnan(sp["radial_velocity"])), False
+            assert_equal(np.any(np.isnan(sp["radial_velocity"])), False)
             mi, ma = dd.quantities["Extrema"]("radial_velocity")
-            yield assert_equal, mi, np.nanmin(dd["radial_velocity"])
-            yield assert_equal, ma, np.nanmax(dd["radial_velocity"])
+            assert_equal(mi, np.nanmin(dd["radial_velocity"]))
+            assert_equal(ma, np.nanmax(dd["radial_velocity"]))
 
 def test_average():
     for nprocs in [1, 2, 4, 8]:
@@ -33,11 +33,11 @@
         for ad in [ds.all_data(), ds.r[0.5, :, :]]:
         
             my_mean = ad.quantities["WeightedAverageQuantity"]("density", "ones")
-            yield assert_rel_equal, my_mean, ad["density"].mean(), 12
+            assert_rel_equal(my_mean, ad["density"].mean(), 12)
 
             my_mean = ad.quantities["WeightedAverageQuantity"]("density", "cell_mass")
             a_mean = (ad["density"] * ad["cell_mass"]).sum() / ad["cell_mass"].sum()
-            yield assert_rel_equal, my_mean, a_mean, 12
+            assert_rel_equal(my_mean, a_mean, 12)
 
 def test_variance():
     for nprocs in [1, 2, 4, 8]:
@@ -45,15 +45,15 @@
         for ad in [ds.all_data(), ds.r[0.5, :, :]]:
         
             my_std, my_mean = ad.quantities["WeightedVariance"]("density", "ones")
-            yield assert_rel_equal, my_mean, ad["density"].mean(), 12
-            yield assert_rel_equal, my_std, ad["density"].std(), 12
+            assert_rel_equal(my_mean, ad["density"].mean(), 12)
+            assert_rel_equal(my_std, ad["density"].std(), 12)
 
             my_std, my_mean = ad.quantities["WeightedVariance"]("density", "cell_mass")        
             a_mean = (ad["density"] * ad["cell_mass"]).sum() / ad["cell_mass"].sum()
-            yield assert_rel_equal, my_mean, a_mean, 12
+            assert_rel_equal(my_mean, a_mean, 12)
             a_std = np.sqrt((ad["cell_mass"] * (ad["density"] - a_mean)**2).sum() / 
                             ad["cell_mass"].sum())
-            yield assert_rel_equal, my_std, a_std, 12
+            assert_rel_equal(my_std, a_std, 12)
 
 def test_max_location():
     for nprocs in [1, 2, 4, 8]:
@@ -62,13 +62,13 @@
 
             mv, x, y, z = ad.quantities.max_location(("gas", "density"))
 
-            yield assert_equal, mv, ad["density"].max()
+            assert_equal(mv, ad["density"].max())
 
             mi = np.argmax(ad["density"])
 
-            yield assert_equal, ad["x"][mi], x
-            yield assert_equal, ad["y"][mi], y
-            yield assert_equal, ad["z"][mi], z
+            assert_equal(ad["x"][mi], x)
+            assert_equal(ad["y"][mi], y)
+            assert_equal(ad["z"][mi], z)
 
 def test_min_location():
     for nprocs in [1, 2, 4, 8]:
@@ -77,13 +77,13 @@
 
             mv, x, y, z = ad.quantities.min_location(("gas", "density"))
 
-            yield assert_equal, mv, ad["density"].min()
+            assert_equal(mv, ad["density"].min())
 
             mi = np.argmin(ad["density"])
 
-            yield assert_equal, ad["x"][mi], x
-            yield assert_equal, ad["y"][mi], y
-            yield assert_equal, ad["z"][mi], z
+            assert_equal(ad["x"][mi], x)
+            assert_equal(ad["y"][mi], y)
+            assert_equal(ad["z"][mi], z)
 
 def test_sample_at_min_field_values():
     for nprocs in [1, 2, 4, 8]:
@@ -94,12 +94,12 @@
             mv, temp, vm = ad.quantities.sample_at_min_field_values(
                 "density", ["temperature", "velocity_x"])
 
-            yield assert_equal, mv, ad["density"].min()
+            assert_equal(mv, ad["density"].min())
 
             mi = np.argmin(ad["density"])
 
-            yield assert_equal, ad["temperature"][mi], temp
-            yield assert_equal, ad["velocity_x"][mi], vm
+            assert_equal(ad["temperature"][mi], temp)
+            assert_equal(ad["velocity_x"][mi], vm)
 
 def test_sample_at_max_field_values():
     for nprocs in [1, 2, 4, 8]:
@@ -110,13 +110,9 @@
             mv, temp, vm = ad.quantities.sample_at_max_field_values(
                 "density", ["temperature", "velocity_x"])
 
-            yield assert_equal, mv, ad["density"].max()
+            assert_equal(mv, ad["density"].max())
 
             mi = np.argmax(ad["density"])
 
-            yield assert_equal, ad["temperature"][mi], temp
-            yield assert_equal, ad["velocity_x"][mi], vm
-
-if __name__ == "__main__":
-    for i in test_extrema():
-        i[0](*i[1:])
+            assert_equal(ad["temperature"][mi], temp)
+            assert_equal(ad["velocity_x"][mi], vm)

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_ellipsoid.py
--- a/yt/data_objects/tests/test_ellipsoid.py
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -40,7 +40,7 @@
                 e0 = e0s[:,i]
                 tilt = tilts[i]
                 ell = ds.ellipsoid(c, A, B, C, e0, tilt)
-                yield assert_array_less, ell["radius"], A
+                assert_array_less(ell["radius"], A)
                 p = np.array([ell[ax] for ax in 'xyz'])
                 dot_evec = [np.zeros_like(ell["radius"]) for i in range(3)]
                 vecs = [ell._e0, ell._e1, ell._e2]
@@ -55,4 +55,4 @@
                 dist = 0
                 for ax_i in range(3):
                     dist += dot_evec[ax_i]**2.0 / mags[ax_i]**2.0
-                yield assert_array_less, dist, 1.0
+                assert_array_less(dist, 1.0)

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_extract_regions.py
--- a/yt/data_objects/tests/test_extract_regions.py
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -23,17 +23,15 @@
         t = ( (dd["temperature"] > 0.5 ) 
             & (dd["density"] < 0.75 )
             & (dd["velocity_x"] > 0.25 ) )
-        yield assert_equal, np.all(r["temperature"] > 0.5), True
-        yield assert_equal, np.all(r["density"] < 0.75), True
-        yield assert_equal, np.all(r["velocity_x"] > 0.25), True
-        yield assert_equal, np.sort(dd["density"][t]), np.sort(r["density"])
-        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
-        # We are disabling these, as cutting cut regions does not presently
-        # work
-        #r2 = r.cut_region( [ "obj['temperature'] < 0.75" ] )
-        #t2 = (r["temperature"] < 0.75)
-        #yield assert_equal, np.sort(r2["temperature"]), np.sort(r["temperature"][t2])
-        #yield assert_equal, np.all(r2["temperature"] < 0.75), True
+        assert_equal(np.all(r["temperature"] > 0.5), True)
+        assert_equal(np.all(r["density"] < 0.75), True)
+        assert_equal(np.all(r["velocity_x"] > 0.25), True)
+        assert_equal(np.sort(dd["density"][t]), np.sort(r["density"]))
+        assert_equal(np.sort(dd["x"][t]), np.sort(r["x"]))
+        r2 = r.cut_region( [ "obj['temperature'] < 0.75" ] )
+        t2 = (r["temperature"] < 0.75)
+        assert_equal(np.sort(r2["temperature"]), np.sort(r["temperature"][t2]))
+        assert_equal(np.all(r2["temperature"] < 0.75), True)
 
         # Now we can test some projections
         dd = ds.all_data()
@@ -42,9 +40,9 @@
             p1 = ds.proj("density", 0, data_source=dd, weight_field=weight)
             p2 = ds.proj("density", 0, data_source=cr, weight_field=weight)
             for f in p1.field_data:
-                yield assert_almost_equal, p1[f], p2[f]
+                assert_almost_equal(p1[f], p2[f])
         cr = dd.cut_region(["obj['density'] > 0.25"])
         p2 = ds.proj("density", 2, data_source=cr)
-        yield assert_equal, p2["density"].max() > 0.25, True
+        assert_equal(p2["density"].max() > 0.25, True)
         p2 = ds.proj("density", 2, data_source=cr, weight_field = "density")
-        yield assert_equal, p2["density"].max() > 0.25, True
+        assert_equal(p2["density"].max() > 0.25, True)

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_image_array.py
--- a/yt/data_objects/tests/test_image_array.py
+++ b/yt/data_objects/tests/test_image_array.py
@@ -26,14 +26,14 @@
     im_arr = ImageArray(dummy_image(10.0, 4))
 
     new_im = im_arr.rescale(inline=False)
-    yield assert_equal, im_arr[:, :, :3].max(), 2 * 10.
-    yield assert_equal, im_arr[:, :, 3].max(), 3 * 10.
-    yield assert_equal, new_im[:, :, :3].sum(axis=2).max(), 1.0
-    yield assert_equal, new_im[:, :, 3].max(), 1.0
+    assert_equal(im_arr[:, :, :3].max(), 2 * 10.)
+    assert_equal(im_arr[:, :, 3].max(), 3 * 10.)
+    assert_equal(new_im[:, :, :3].sum(axis=2).max(), 1.0)
+    assert_equal(new_im[:, :, 3].max(), 1.0)
 
     im_arr.rescale()
-    yield assert_equal, im_arr[:, :, :3].sum(axis=2).max(), 1.0
-    yield assert_equal, im_arr[:, :, 3].max(), 1.0
+    assert_equal(im_arr[:, :, :3].sum(axis=2).max(), 1.0)
+    assert_equal(im_arr[:, :, 3].max(), 1.0)
 
 
 class TestImageArray(unittest.TestCase):

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_numpy_ops.py
--- a/yt/data_objects/tests/test_numpy_ops.py
+++ b/yt/data_objects/tests/test_numpy_ops.py
@@ -136,18 +136,18 @@
         ad = ds.all_data()
 
         q = ad.argmin("density", axis=["density"])
-        yield assert_equal, q, ad["density"].min()
+        assert_equal(q, ad["density"].min())
 
         q1, q2 = ad.argmin("density", axis=["density", "temperature"])
         mi = np.argmin(ad["density"])
-        yield assert_equal, q1, ad["density"].min()
-        yield assert_equal, q2, ad["temperature"][mi]
+        assert_equal(q1, ad["density"].min())
+        assert_equal(q2, ad["temperature"][mi])
 
         pos = ad.argmin("density")
         mi = np.argmin(ad["density"])
-        yield assert_equal, pos[0], ad["x"][mi]
-        yield assert_equal, pos[1], ad["y"][mi]
-        yield assert_equal, pos[2], ad["z"][mi]
+        assert_equal(pos[0], ad["x"][mi])
+        assert_equal(pos[1], ad["y"][mi])
+        assert_equal(pos[2], ad["z"][mi])
 
 def test_argmax():
     for nprocs in [-1, 1, 2, 16]:
@@ -160,15 +160,15 @@
         ad = ds.all_data()
 
         q = ad.argmax("density", axis=["density"])
-        yield assert_equal, q, ad["density"].max()
+        assert_equal(q, ad["density"].max())
 
         q1, q2 = ad.argmax("density", axis=["density", "temperature"])
         mi = np.argmax(ad["density"])
-        yield assert_equal, q1, ad["density"].max()
-        yield assert_equal, q2, ad["temperature"][mi]
+        assert_equal(q1, ad["density"].max())
+        assert_equal(q2, ad["temperature"][mi])
 
         pos = ad.argmax("density")
         mi = np.argmax(ad["density"])
-        yield assert_equal, pos[0], ad["x"][mi]
-        yield assert_equal, pos[1], ad["y"][mi]
-        yield assert_equal, pos[2], ad["z"][mi]
+        assert_equal(pos[0], ad["x"][mi])
+        assert_equal(pos[1], ad["y"][mi])
+        assert_equal(pos[2], ad["z"][mi])

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_ortho_rays.py
--- a/yt/data_objects/tests/test_ortho_rays.py
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -25,5 +25,5 @@
                    (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
                     0.5 * dx[my_axes[1]])
 
-        yield assert_equal, my_oray['density'].sum(), \
-                            my_all['density'][my_cells].sum()
+        assert_equal(my_oray['density'].sum(),
+                     my_all['density'][my_cells].sum())

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_pickle.py
--- a/yt/data_objects/tests/test_pickle.py
+++ b/yt/data_objects/tests/test_pickle.py
@@ -52,9 +52,9 @@
 
     assert_equal.description = \
         "%s: File was pickle-loaded succesfully" % __name__
-    yield assert_equal, test_load is not None, True
+    assert_equal(test_load is not None, True)
     assert_equal.description = \
         "%s: Length of pickle-loaded connected set object" % __name__
-    yield assert_equal, len(contours[1][0]), len(test_load)
+    assert_equal(len(contours[1][0]), len(test_load))
 
     os.remove(cpklfile.name)

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -54,9 +54,8 @@
 
                 for p1d in [direct_profile, indirect_profile_s,
                             indirect_profile_t]:
-                    yield assert_equal, p1d["index", "ones"].sum(), nv
-                    yield assert_rel_equal, tt, \
-                        p1d["gas", "temperature"].sum(), 7
+                    assert_equal(p1d["index", "ones"].sum(), nv)
+                    assert_rel_equal(tt, p1d["gas", "temperature"].sum(), 7)
 
                 p2d = Profile2D(
                     dd,
@@ -64,8 +63,8 @@
                     "temperature", nb, tmi*e1, tma*e2, lf,
                     weight_field=None)
                 p2d.add_fields(["ones", "temperature"])
-                yield assert_equal, p2d["ones"].sum(), nv
-                yield assert_rel_equal, tt, p2d["temperature"].sum(), 7
+                assert_equal(p2d["ones"].sum(), nv)
+                assert_rel_equal(tt, p2d["temperature"].sum(), 7)
 
                 p3d = Profile3D(
                     dd,
@@ -74,39 +73,39 @@
                     "dinosaurs",   nb, dmi*e1, dma*e2, lf,
                     weight_field=None)
                 p3d.add_fields(["ones", "temperature"])
-                yield assert_equal, p3d["ones"].sum(), nv
-                yield assert_rel_equal, tt, p3d["temperature"].sum(), 7
+                assert_equal(p3d["ones"].sum(), nv)
+                assert_rel_equal(tt, p3d["temperature"].sum(), 7)
 
         p1d = Profile1D(dd, "x", nb, 0.0, 1.0, False,
                         weight_field = None)
         p1d.add_fields("ones")
         av = nv / nb
-        yield assert_equal, p1d["ones"], np.ones(nb)*av
+        assert_equal(p1d["ones"], np.ones(nb)*av)
 
         # We re-bin ones with a weight now
         p1d = Profile1D(dd, "x", nb, 0.0, 1.0, False,
                         weight_field = "temperature")
         p1d.add_fields(["ones"])
-        yield assert_equal, p1d["ones"], np.ones(nb)
+        assert_equal(p1d["ones"], np.ones(nb))
 
         # Verify we can access "ones" after adding a new field
         # See issue 988
         p1d.add_fields(["density"])
-        yield assert_equal, p1d["ones"], np.ones(nb)
+        assert_equal(p1d["ones"], np.ones(nb))
 
         p2d = Profile2D(dd, "x", nb, 0.0, 1.0, False,
                             "y", nb, 0.0, 1.0, False,
                             weight_field = None)
         p2d.add_fields("ones")
         av = nv / nb**2
-        yield assert_equal, p2d["ones"], np.ones((nb, nb))*av
+        assert_equal(p2d["ones"], np.ones((nb, nb))*av)
 
         # We re-bin ones with a weight now
         p2d = Profile2D(dd, "x", nb, 0.0, 1.0, False,
                             "y", nb, 0.0, 1.0, False,
                             weight_field = "temperature")
         p2d.add_fields(["ones"])
-        yield assert_equal, p2d["ones"], np.ones((nb, nb))
+        assert_equal(p2d["ones"], np.ones((nb, nb)))
 
         p3d = Profile3D(dd, "x", nb, 0.0, 1.0, False,
                             "y", nb, 0.0, 1.0, False,
@@ -114,7 +113,7 @@
                             weight_field = None)
         p3d.add_fields("ones")
         av = nv / nb**3
-        yield assert_equal, p3d["ones"], np.ones((nb, nb, nb))*av
+        assert_equal(p3d["ones"], np.ones((nb, nb, nb))*av)
 
         # We re-bin ones with a weight now
         p3d = Profile3D(dd, "x", nb, 0.0, 1.0, False,
@@ -122,7 +121,7 @@
                             "z", nb, 0.0, 1.0, False,
                             weight_field = "temperature")
         p3d.add_fields(["ones"])
-        yield assert_equal, p3d["ones"], np.ones((nb,nb,nb))
+        assert_equal(p3d["ones"], np.ones((nb,nb,nb)))
 
 extrema_s = {'particle_position_x': (0, 1)}
 logs_s = {'particle_position_x': False}
@@ -138,32 +137,32 @@
         p1d = Profile1D(dd, "particle_position_x", 128,
                         0.0, 1.0, False, weight_field = None)
         p1d.add_fields(["particle_ones"])
-        yield assert_equal, p1d["particle_ones"].sum(), 32**3
+        assert_equal(p1d["particle_ones"].sum(), 32**3)
 
         p1d = create_profile(dd, ["particle_position_x"], ["particle_ones"],
                              weight_field=None, n_bins=128, extrema=extrema_s,
                              logs=logs_s)
-        yield assert_equal, p1d["particle_ones"].sum(), 32**3
+        assert_equal(p1d["particle_ones"].sum(), 32**3)
 
         p1d = create_profile(dd,
                              [("all", "particle_position_x")],
                              [("all", "particle_ones")],
                              weight_field=None, n_bins=128, extrema=extrema_t,
                              logs=logs_t)
-        yield assert_equal, p1d["particle_ones"].sum(), 32**3
+        assert_equal(p1d["particle_ones"].sum(), 32**3)
 
         p2d = Profile2D(dd, "particle_position_x", 128, 0.0, 1.0, False,
                             "particle_position_y", 128, 0.0, 1.0, False,
                         weight_field = None)
         p2d.add_fields(["particle_ones"])
-        yield assert_equal, p2d["particle_ones"].sum(), 32**3
+        assert_equal(p2d["particle_ones"].sum(), 32**3)
 
         p3d = Profile3D(dd, "particle_position_x", 128, 0.0, 1.0, False,
                             "particle_position_y", 128, 0.0, 1.0, False,
                             "particle_position_z", 128, 0.0, 1.0, False,
                         weight_field = None)
         p3d.add_fields(["particle_ones"])
-        yield assert_equal, p3d["particle_ones"].sum(), 32**3
+        assert_equal(p3d["particle_ones"].sum(), 32**3)
 
 def test_mixed_particle_mesh_profiles():
     ds = fake_random_ds(32, particles=10)

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -44,8 +44,8 @@
         # test if projections inherit the field parameters of their data sources
         dd.set_field_parameter("bulk_velocity", np.array([0,1,2]))
         proj = ds.proj("density", 0, data_source=dd)
-        yield assert_equal, dd.field_parameters["bulk_velocity"], \
-          proj.field_parameters["bulk_velocity"]
+        assert_equal(dd.field_parameters["bulk_velocity"],
+                     proj.field_parameters["bulk_velocity"])
 
         # Some simple projection tests with single grids
         for ax, an in enumerate("xyz"):
@@ -54,17 +54,18 @@
             for wf in ['density', ("gas", "density"), None]:
                 proj = ds.proj(["ones", "density"], ax, weight_field=wf)
                 if wf is None:
-                    yield assert_equal, proj["ones"].sum(), LENGTH_UNIT*proj["ones"].size
-                    yield assert_equal, proj["ones"].min(), LENGTH_UNIT
-                    yield assert_equal, proj["ones"].max(), LENGTH_UNIT
+                    assert_equal(proj["ones"].sum(),
+                                 LENGTH_UNIT*proj["ones"].size)
+                    assert_equal(proj["ones"].min(), LENGTH_UNIT)
+                    assert_equal(proj["ones"].max(), LENGTH_UNIT)
                 else:
-                    yield assert_equal, proj["ones"].sum(), proj["ones"].size
-                    yield assert_equal, proj["ones"].min(), 1.0
-                    yield assert_equal, proj["ones"].max(), 1.0
-                yield assert_equal, np.unique(proj["px"]), uc[xax]
-                yield assert_equal, np.unique(proj["py"]), uc[yax]
-                yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
-                yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+                    assert_equal(proj["ones"].sum(), proj["ones"].size)
+                    assert_equal(proj["ones"].min(), 1.0)
+                    assert_equal(proj["ones"].max(), 1.0)
+                assert_equal(np.unique(proj["px"]), uc[xax])
+                assert_equal(np.unique(proj["py"]), uc[yax])
+                assert_equal(np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0))
+                assert_equal(np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0))
                 plots = [proj.to_pw(fields='density'), proj.to_pw()]
                 for pw in plots:
                     for p in pw.plots.values():
@@ -75,16 +76,15 @@
                 frb = proj.to_frb((1.0, 'unitary'), 64)
                 for proj_field in ['ones', 'density', 'temperature']:
                     fi = ds._get_field_info(proj_field)
-                    yield assert_equal, frb[proj_field].info['data_source'], \
-                        proj.__str__()
-                    yield assert_equal, frb[proj_field].info['axis'], \
-                        ax
-                    yield assert_equal, frb[proj_field].info['field'], \
-                        proj_field
+                    assert_equal(frb[proj_field].info['data_source'],
+                                 proj.__str__())
+                    assert_equal(frb[proj_field].info['axis'], ax)
+                    assert_equal(frb[proj_field].info['field'], proj_field)
                     field_unit = Unit(fi.units)
                     if wf is not None:
-                        yield assert_equal, frb[proj_field].units, \
-                            Unit(field_unit, registry=ds.unit_registry)
+                        assert_equal(
+                            frb[proj_field].units,
+                            Unit(field_unit, registry=ds.unit_registry))
                     else:
                         if frb[proj_field].units.is_code_unit:
                             proj_unit = "code_length"
@@ -93,26 +93,23 @@
                         if field_unit != '' and field_unit != Unit():
                             proj_unit = \
                                 "({0}) * {1}".format(field_unit, proj_unit)
-                        yield assert_equal, frb[proj_field].units, \
-                            Unit(proj_unit, registry=ds.unit_registry)
-                    yield assert_equal, frb[proj_field].info['xlim'], \
-                        frb.bounds[:2]
-                    yield assert_equal, frb[proj_field].info['ylim'], \
-                        frb.bounds[2:]
-                    yield assert_equal, frb[proj_field].info['center'], \
-                        proj.center
+                        assert_equal(
+                            frb[proj_field].units,
+                            Unit(proj_unit, registry=ds.unit_registry))
+                    assert_equal(frb[proj_field].info['xlim'], frb.bounds[:2])
+                    assert_equal(frb[proj_field].info['ylim'], frb.bounds[2:])
+                    assert_equal(frb[proj_field].info['center'], proj.center)
                     if wf is None:
-                        yield assert_equal, \
-                            frb[proj_field].info['weight_field'], wf
+                        assert_equal(frb[proj_field].info['weight_field'], wf)
                     else:
-                        yield assert_equal, \
-                            frb[proj_field].info['weight_field'], \
-                            proj.data_source._determine_fields(wf)[0]
+                        assert_equal(
+                            frb[proj_field].info['weight_field'],
+                            proj.data_source._determine_fields(wf)[0])
             # wf == None
-            yield assert_equal, wf, None
+            assert_equal(wf, None)
             v1 = proj["density"].sum()
             v2 = (LENGTH_UNIT * dd["density"] * dd["d%s" % an]).sum()
-            yield assert_rel_equal, v1, v2, 10
+            assert_rel_equal(v1, v2, 10)
     teardown_func(fns)
 
 

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_rays.py
--- a/yt/data_objects/tests/test_rays.py
+++ b/yt/data_objects/tests/test_rays.py
@@ -30,7 +30,7 @@
             p2 = ds.arr(pp2[:,i] + 1e-8 * np.random.random(3), 'code_length')
 
             my_ray = ds.ray(p1, p2)
-            yield assert_rel_equal, my_ray['dts'].sum(), unitary, 14
+            assert_rel_equal(my_ray['dts'].sum(), unitary, 14)
             ray_cells = my_ray['dts'] > 0
 
             # find cells intersected by the ray
@@ -47,10 +47,10 @@
             tout = tout.min(axis=0)
             my_cells = (tin < tout) & (tin < 1) & (tout > 0)
 
-            yield assert_equal, ray_cells.sum(), my_cells.sum()
-            yield assert_rel_equal, my_ray['density'][ray_cells].sum(), \
-                                    my_all['density'][my_cells].sum(), 14
-            yield assert_rel_equal, my_ray['dts'].sum(), unitary, 14
+            assert_equal(ray_cells.sum(), my_cells.sum())
+            assert_rel_equal(my_ray['density'][ray_cells].sum(),
+                             my_all['density'][my_cells].sum(), 14)
+            assert_rel_equal(my_ray['dts'].sum(), unitary, 14)
 
 @requires_file('GadgetDiskGalaxy/snapshot_200.hdf5')
 def test_ray_in_particle_octree():

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -54,13 +54,13 @@
             for wf in ["density", None]:
                 slc = ds.slice(ax, slc_pos)
                 shifted_slc = ds.slice(ax, slc_pos + grid_eps)
-                yield assert_equal, slc["ones"].sum(), slc["ones"].size
-                yield assert_equal, slc["ones"].min(), 1.0
-                yield assert_equal, slc["ones"].max(), 1.0
-                yield assert_equal, np.unique(slc["px"]), uc[xax]
-                yield assert_equal, np.unique(slc["py"]), uc[yax]
-                yield assert_equal, np.unique(slc["pdx"]), 0.5 / dims[xax]
-                yield assert_equal, np.unique(slc["pdy"]), 0.5 / dims[yax]
+                assert_equal(slc["ones"].sum(), slc["ones"].size)
+                assert_equal(slc["ones"].min(), 1.0)
+                assert_equal(slc["ones"].max(), 1.0)
+                assert_equal(np.unique(slc["px"]), uc[xax])
+                assert_equal(np.unique(slc["py"]), uc[yax])
+                assert_equal(np.unique(slc["pdx"]), 0.5 / dims[xax])
+                assert_equal(np.unique(slc["pdy"]), 0.5 / dims[yax])
                 pw = slc.to_pw(fields='density')
                 for p in pw.plots.values():
                     tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
@@ -72,25 +72,19 @@
                     shifted_frb = shifted_slc.to_frb((1.0, 'unitary'), 64)
                     for slc_field in ['ones', 'density']:
                         fi = ds._get_field_info(slc_field)
-                        yield assert_equal, frb[slc_field].info['data_source'], \
-                            slc.__str__()
-                        yield assert_equal, frb[slc_field].info['axis'], \
-                            ax
-                        yield assert_equal, frb[slc_field].info['field'], \
-                            slc_field
-                        yield assert_equal, frb[slc_field].units, \
-                            Unit(fi.units)
-                        yield assert_equal, frb[slc_field].info['xlim'], \
-                            frb.bounds[:2]
-                        yield assert_equal, frb[slc_field].info['ylim'], \
-                            frb.bounds[2:]
-                        yield assert_equal, frb[slc_field].info['center'], \
-                            slc.center
-                        yield assert_equal, frb[slc_field].info['coord'], \
-                            slc_pos
-                        yield assert_equal, frb[slc_field], \
-                            shifted_frb[slc_field]
-            yield assert_equal, wf, None
+                        assert_equal(frb[slc_field].info['data_source'],
+                                     slc.__str__())
+                        assert_equal(frb[slc_field].info['axis'], ax)
+                        assert_equal(frb[slc_field].info['field'], slc_field)
+                        assert_equal(frb[slc_field].units, Unit(fi.units))
+                        assert_equal(frb[slc_field].info['xlim'],
+                                     frb.bounds[:2])
+                        assert_equal(frb[slc_field].info['ylim'],
+                                     frb.bounds[2:])
+                        assert_equal(frb[slc_field].info['center'], slc.center)
+                        assert_equal(frb[slc_field].info['coord'], slc_pos)
+                        assert_equal(frb[slc_field], shifted_frb[slc_field])
+            assert_equal(wf, None)
     teardown_func(fns)
 
 
@@ -106,4 +100,4 @@
     ds = fake_random_ds(64, nprocs=8, fields=["density"], negative=[False])
     slc = ds.slice(2, 1.0)
     slc["density"]
-    yield assert_equal, slc["density"].size, 0
+    assert_equal(slc["density"].size, 0)

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_spheres.py
--- a/yt/data_objects/tests/test_spheres.py
+++ b/yt/data_objects/tests/test_spheres.py
@@ -32,8 +32,8 @@
     # Set the bulk velocity field parameter
     sp1.set_field_parameter("bulk_velocity", bulk_vel)
 
-    yield assert_equal, np.any(sp0["radial_velocity"] ==
-                               sp1["radial_velocity"]), False
+    assert_equal(np.any(sp0["radial_velocity"] == sp1["radial_velocity"]),
+                 False)
 
     # Radial profile without correction
     # Note we set n_bins = 8 here.
@@ -50,12 +50,12 @@
                          logs = {'radius': False},
                          n_bins = 8)
 
-    yield assert_equal, rp0.x_bins, rp1.x_bins
-    yield assert_equal, rp0.used, rp1.used
-    yield assert_equal, rp0.used.sum() > rp0.used.size/2.0, True
-    yield assert_equal, np.any(rp0["radial_velocity"][rp0.used] ==
-                               rp1["radial_velocity"][rp1.used]), \
-                               False
+    assert_equal(rp0.x_bins, rp1.x_bins)
+    assert_equal(rp0.used, rp1.used)
+    assert_equal(rp0.used.sum() > rp0.used.size/2.0, True)
+    assert_equal(np.any(rp0["radial_velocity"][rp0.used] ==
+                        rp1["radial_velocity"][rp1.used]),
+                 False)
 
     ref_sp = ds.sphere("c", 0.25)
     for f in _fields_to_compare:
@@ -64,4 +64,4 @@
         sp = ds.sphere(center, 0.25)
         for f in _fields_to_compare:
             sp[f].sort()
-            yield assert_equal, sp[f], ref_sp[f]
+            assert_equal(sp[f], ref_sp[f])

diff -r 5e360caa60a5f430538bc66939788a1c1e58682b -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 yt/data_objects/tests/test_streamlines.py
--- a/yt/data_objects/tests/test_streamlines.py
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -23,6 +23,6 @@
         streams = Streamlines(ds, cs, length=length)
         streams.integrate_through_volume()
         for path in (streams.path(i) for i in range(8)):
-            yield assert_rel_equal, path['dts'].sum(), 1.0, 14
-            yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
+            assert_rel_equal(path['dts'].sum(), 1.0, 14)
+            assert_equal(np.all(path['t'] <= (1.0 + 1e-10)), True)
             path["density"]


https://bitbucket.org/yt_analysis/yt/commits/2c35a84ba0ac/
Changeset:   2c35a84ba0ac
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 18:29:53+00:00
Summary:     Make the data object tests run without raising runtime warnings
Affected #:  2 files

diff -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 -r 2c35a84ba0ac19c0293e17d4b1df40aa51b40cd1 yt/data_objects/tests/test_compose.py
--- a/yt/data_objects/tests/test_compose.py
+++ b/yt/data_objects/tests/test_compose.py
@@ -34,7 +34,7 @@
     empty = np.array([])
     for n in [1, 2, 4, 8]:
         ds = fake_random_ds(64, nprocs=n)
-        ds.add_field(("index", "ID"), function=_IDFIELD)
+        ds.add_field(("index", "ID"), sampling_type='cell', function=_IDFIELD)
 
         # position parameters for initial region
         center = [0.25]*3
@@ -81,7 +81,7 @@
     """
     for n in [1, 2, 4, 8]:
         ds = fake_random_ds(64, nprocs=n)
-        ds.add_field(("index", "ID"), function=_IDFIELD)
+        ds.add_field(("index", "ID"), sampling_type='cell', function=_IDFIELD)
 
         # position parameters for initial region
         center = [0.4, 0.5, 0.5]

diff -r e39e1d0e75330ed1afeadf65abdc2f38310791a1 -r 2c35a84ba0ac19c0293e17d4b1df40aa51b40cd1 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -1234,7 +1234,12 @@
 
     JdotCoords = np.sum(J*coords,axis=0)
 
-    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=0)) )
+    with np.errstate(invalid='ignore'):
+        ret = np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=0)))
+
+    ret[np.isnan(ret)] = 0
+
+    return ret
 
 def get_sph_phi(coords, normal):
     # We have freedom with respect to what axis (xprime) to define


https://bitbucket.org/yt_analysis/yt/commits/cb3e8d445eed/
Changeset:   cb3e8d445eed
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 18:57:44+00:00
Summary:     eliminate yield assert from yt.visualization tests
Affected #:  6 files

diff -r 2c35a84ba0ac19c0293e17d4b1df40aa51b40cd1 -r cb3e8d445eedf763d52756cd074856d4913aad3e yt/visualization/tests/test_callbacks.py
--- a/yt/visualization/tests/test_callbacks.py
+++ b/yt/visualization/tests/test_callbacks.py
@@ -78,13 +78,13 @@
         ds = fake_amr_ds(fields = ("density",))
         p = ProjectionPlot(ds, ax, "density")
         p.annotate_timestamp()
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         p = SlicePlot(ds, ax, "density")
         p.annotate_timestamp()
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         p = OffAxisSlicePlot(ds, vector, "density")
         p.annotate_timestamp()
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         # Now we'll check a few additional minor things
         p = SlicePlot(ds, "x", "density")
         p.annotate_timestamp(corner='lower_right', redshift=True,
@@ -95,10 +95,10 @@
         ds = fake_amr_ds(fields = ("density",), geometry="spherical")
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_timestamp(coord_system="data")
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_timestamp(coord_system="axis")
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
 
 def test_scale_callback():
     with _cleanup_fname() as prefix:
@@ -147,13 +147,13 @@
         ds = fake_amr_ds(fields = ("density",))
         p = ProjectionPlot(ds, ax, "density")
         p.annotate_line([0.1,0.1,0.1],[0.5,0.5,0.5])
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         p = SlicePlot(ds, ax, "density")
         p.annotate_line([0.1,0.1,0.1],[0.5,0.5,0.5])
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         p = OffAxisSlicePlot(ds, vector, "density")
         p.annotate_line([0.1,0.1,0.1],[0.5,0.5,0.5])
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         # Now we'll check a few additional minor things
         p = SlicePlot(ds, "x", "density")
         p.annotate_line([0.1,0.1],[0.5,0.5], coord_system='axis',
@@ -164,10 +164,10 @@
         ds = fake_amr_ds(fields = ("density",), geometry="spherical")
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_line([0.1,0.1,0.1],[0.5,0.5,0.5])
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_line([0.1,0.1],[0.5,0.5], coord_system="axis")
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
 
 def test_ray_callback():
     with _cleanup_fname() as prefix:
@@ -179,15 +179,15 @@
         p = ProjectionPlot(ds, ax, "density")
         p.annotate_ray(oray)
         p.annotate_ray(ray)
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         p = SlicePlot(ds, ax, "density")
         p.annotate_ray(oray)
         p.annotate_ray(ray)
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         p = OffAxisSlicePlot(ds, vector, "density")
         p.annotate_ray(oray)
         p.annotate_ray(ray)
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         # Now we'll check a few additional minor things
         p = SlicePlot(ds, "x", "density")
         p.annotate_ray(oray)
@@ -200,10 +200,10 @@
         oray = ds.ortho_ray(0, (0.3, 0.4))
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_ray(oray)
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_ray(ray)
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)
 
 def test_arrow_callback():
     with _cleanup_fname() as prefix:
@@ -212,13 +212,13 @@
         ds = fake_amr_ds(fields = ("density",))
         p = ProjectionPlot(ds, ax, "density")
         p.annotate_arrow([0.5,0.5,0.5])
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         p = SlicePlot(ds, ax, "density")
         p.annotate_arrow([0.5,0.5,0.5])
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         p = OffAxisSlicePlot(ds, vector, "density")
         p.annotate_arrow([0.5,0.5,0.5])
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         # Now we'll check a few additional minor things
         p = SlicePlot(ds, "x", "density")
         p.annotate_arrow([0.5,0.5], coord_system='axis', length=0.05)
@@ -228,10 +228,10 @@
         ds = fake_amr_ds(fields = ("density",), geometry="spherical")
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_arrow([0.5,0.5,0.5])
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_arrow([0.5,0.5], coord_system="axis")
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
 
 def test_marker_callback():
     with _cleanup_fname() as prefix:
@@ -240,13 +240,13 @@
         ds = fake_amr_ds(fields = ("density",))
         p = ProjectionPlot(ds, ax, "density")
         p.annotate_marker([0.5,0.5,0.5])
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         p = SlicePlot(ds, ax, "density")
         p.annotate_marker([0.5,0.5,0.5])
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         p = OffAxisSlicePlot(ds, vector, "density")
         p.annotate_marker([0.5,0.5,0.5])
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         # Now we'll check a few additional minor things
         p = SlicePlot(ds, "x", "density")
         p.annotate_marker([0.5,0.5], coord_system='axis', marker='*')
@@ -256,21 +256,21 @@
         ds = fake_amr_ds(fields = ("density",), geometry="spherical")
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_marker([0.5,0.5,0.5])
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_marker([0.5,0.5], coord_system="axis")
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
 
 def test_particles_callback():
     with _cleanup_fname() as prefix:
         ax = 'z'
-        ds = fake_amr_ds(fields=("density",), particles=True)
+        ds = fake_amr_ds(fields=("density",), particles=1)
         p = ProjectionPlot(ds, ax, "density")
         p.annotate_particles((10, "Mpc"))
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         p = SlicePlot(ds, ax, "density")
         p.annotate_particles((10, "Mpc"))
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         # Now we'll check a few additional minor things
         p = SlicePlot(ds, "x", "density")
         p.annotate_particles((10, "Mpc"), p_size=1.0, col="k", marker="o",
@@ -282,7 +282,7 @@
         ds = fake_amr_ds(fields=("density",), geometry="spherical")
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_particles((10, "Mpc"))
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)
 
 def test_sphere_callback():
     with _cleanup_fname() as prefix:
@@ -291,13 +291,13 @@
         ds = fake_amr_ds(fields = ("density",))
         p = ProjectionPlot(ds, ax, "density")
         p.annotate_sphere([0.5,0.5,0.5], 0.1)
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         p = SlicePlot(ds, ax, "density")
         p.annotate_sphere([0.5,0.5,0.5], 0.1)
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         p = OffAxisSlicePlot(ds, vector, "density")
         p.annotate_sphere([0.5,0.5,0.5], 0.1)
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         # Now we'll check a few additional minor things
         p = SlicePlot(ds, "x", "density")
         p.annotate_sphere([0.5,0.5], 0.1, coord_system='axis', text='blah')
@@ -307,10 +307,10 @@
         ds = fake_amr_ds(fields = ("density",), geometry="spherical")
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_sphere([0.5,0.5,0.5], 0.1)
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_sphere([0.5,0.5], 0.1, coord_system='axis', text='blah')
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
 
 def test_text_callback():
     with _cleanup_fname() as prefix:
@@ -319,13 +319,13 @@
         ds = fake_amr_ds(fields = ("density",))
         p = ProjectionPlot(ds, ax, "density")
         p.annotate_text([0.5,0.5,0.5], 'dinosaurs!')
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         p = SlicePlot(ds, ax, "density")
         p.annotate_text([0.5,0.5,0.5], 'dinosaurs!')
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         p = OffAxisSlicePlot(ds, vector, "density")
         p.annotate_text([0.5,0.5,0.5], 'dinosaurs!')
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         # Now we'll check a few additional minor things
         p = SlicePlot(ds, "x", "density")
         p.annotate_text([0.5,0.5], 'dinosaurs!', coord_system='axis',
@@ -336,11 +336,11 @@
         ds = fake_amr_ds(fields = ("density",), geometry="spherical")
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_text([0.5,0.5,0.5], 'dinosaurs!')
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_text([0.5,0.5], 'dinosaurs!', coord_system='axis',
                         text_args={'color':'red'})
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
 
 def test_velocity_callback():
     with _cleanup_fname() as prefix:
@@ -349,19 +349,19 @@
         for ax in 'xyz':
             p = ProjectionPlot(ds, ax, "density", weight_field="density")
             p.annotate_velocity()
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
             p = SlicePlot(ds, ax, "density")
             p.annotate_velocity()
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
         # Test for OffAxis Slice
         p = SlicePlot(ds, [1, 1, 0], 'density', north_vector=[0, 0, 1])
         p.annotate_velocity(factor=40, normalize=True)
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         # Now we'll check a few additional minor things
         p = SlicePlot(ds, "x", "density")
         p.annotate_velocity(factor=8, scale=0.5, scale_units="inches",
                             normalize = True)
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
 
     with _cleanup_fname() as prefix:
         ds = fake_amr_ds(fields = 
@@ -369,7 +369,7 @@
             geometry="spherical")
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_velocity(factor=40, normalize=True)
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)
 
 def test_magnetic_callback():
     with _cleanup_fname() as prefix:
@@ -378,19 +378,19 @@
         for ax in 'xyz':
             p = ProjectionPlot(ds, ax, "density", weight_field="density")
             p.annotate_magnetic_field()
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
             p = SlicePlot(ds, ax, "density")
             p.annotate_magnetic_field()
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
         # Test for OffAxis Slice
         p = SlicePlot(ds, [1, 1, 0], 'density', north_vector=[0, 0, 1])
         p.annotate_magnetic_field(factor=40, normalize=True)
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
         # Now we'll check a few additional minor things
         p = SlicePlot(ds, "x", "density")
         p.annotate_magnetic_field(factor=8, scale=0.5,
             scale_units="inches", normalize = True)
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
 
     with _cleanup_fname() as prefix:
         ds = fake_amr_ds(fields = ("density", "magnetic_field_r",
@@ -399,7 +399,7 @@
         p = ProjectionPlot(ds, "r", "density")
         p.annotate_magnetic_field(factor=8, scale=0.5,
             scale_units="inches", normalize = True)
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)
 
 def test_quiver_callback():
     with _cleanup_fname() as prefix:
@@ -408,20 +408,20 @@
         for ax in 'xyz':
             p = ProjectionPlot(ds, ax, "density")
             p.annotate_quiver("velocity_x", "velocity_y")
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
             p = ProjectionPlot(ds, ax, "density", weight_field="density")
             p.annotate_quiver("velocity_x", "velocity_y")
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
             p = SlicePlot(ds, ax, "density")
             p.annotate_quiver("velocity_x", "velocity_y")
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
         # Now we'll check a few additional minor things
         p = SlicePlot(ds, "x", "density")
         p.annotate_quiver("velocity_x", "velocity_y", factor=8, scale=0.5,
             scale_units="inches", normalize = True,
             bv_x = 0.5 * u.cm / u.s,
             bv_y = 0.5 * u.cm / u.s)
-        yield assert_fname, p.save(prefix)[0]
+        assert_fname(p.save(prefix)[0])
 
     with _cleanup_fname() as prefix:
         ds = fake_amr_ds(fields = 
@@ -432,7 +432,7 @@
             scale_units="inches", normalize = True,
             bv_x = 0.5 * u.cm / u.s,
             bv_y = 0.5 * u.cm / u.s)
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)
 
 def test_contour_callback():
     with _cleanup_fname() as prefix:
@@ -440,13 +440,13 @@
         for ax in 'xyz':
             p = ProjectionPlot(ds, ax, "density")
             p.annotate_contour("temperature")
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
             p = ProjectionPlot(ds, ax, "density", weight_field="density")
             p.annotate_contour("temperature")
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
             p = SlicePlot(ds, ax, "density")
             p.annotate_contour("temperature") # BREAKS WITH ndarray
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
         # Now we'll check a few additional minor things
         p = SlicePlot(ds, "x", "density")
         p.annotate_contour("temperature", ncont=10, factor=8,
@@ -472,7 +472,7 @@
             take_log=False, clim=(0.4, 0.6),
             plot_args={'lw':2.0}, label=True,
             text_args={'text-size':'x-large'})
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)
 
 
 def test_grids_callback():
@@ -481,13 +481,13 @@
         for ax in 'xyz':
             p = ProjectionPlot(ds, ax, "density")
             p.annotate_grids()
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
             p = ProjectionPlot(ds, ax, "density", weight_field="density")
             p.annotate_grids()
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
             p = SlicePlot(ds, ax, "density")
             p.annotate_grids()
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
         # Now we'll check a few additional minor things
         p = SlicePlot(ds, "x", "density")
         p.annotate_grids(alpha=0.7, min_pix=10, min_pix_ids=30,
@@ -501,7 +501,7 @@
         p.annotate_grids(alpha=0.7, min_pix=10, min_pix_ids=30,
             draw_ids=True, periodic=False, min_level=2,
             max_level=3, cmap="gist_stern")
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)
 
 
 def test_cell_edges_callback():
@@ -510,13 +510,13 @@
         for ax in 'xyz':
             p = ProjectionPlot(ds, ax, "density")
             p.annotate_cell_edges()
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
             p = ProjectionPlot(ds, ax, "density", weight_field="density")
             p.annotate_cell_edges()
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
             p = SlicePlot(ds, ax, "density")
             p.annotate_cell_edges()
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
         # Now we'll check a few additional minor things
         p = SlicePlot(ds, "x", "density")
         p.annotate_cell_edges(alpha=0.7, line_width=0.9,
@@ -527,7 +527,7 @@
         ds = fake_amr_ds(fields = ("density",), geometry="spherical")
         p = SlicePlot(ds, "r", "density")
         p.annotate_cell_edges()
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)
 
 def test_mesh_lines_callback():
     with _cleanup_fname() as prefix:
@@ -536,13 +536,13 @@
         for field in ds.field_list:
             sl = SlicePlot(ds, 1, field)
             sl.annotate_mesh_lines(plot_args={'color':'black'})
-            yield assert_fname, sl.save(prefix)[0]
+            assert_fname(sl.save(prefix)[0])
 
         ds = fake_tetrahedral_ds()
         for field in ds.field_list:
             sl = SlicePlot(ds, 1, field)
             sl.annotate_mesh_lines(plot_args={'color':'black'})
-            yield assert_fname, sl.save(prefix)[0]
+            assert_fname(sl.save(prefix)[0])
                 
 
 def test_line_integral_convolution_callback():
@@ -552,13 +552,13 @@
         for ax in 'xyz':
             p = ProjectionPlot(ds, ax, "density")
             p.annotate_line_integral_convolution("velocity_x", "velocity_y")
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
             p = ProjectionPlot(ds, ax, "density", weight_field="density")
             p.annotate_line_integral_convolution("velocity_x", "velocity_y")
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
             p = SlicePlot(ds, ax, "density")
             p.annotate_line_integral_convolution("velocity_x", "velocity_y")
-            yield assert_fname, p.save(prefix)[0]
+            assert_fname(p.save(prefix)[0])
         # Now we'll check a few additional minor things
         p = SlicePlot(ds, "x", "density")
         p.annotate_line_integral_convolution("velocity_x", "velocity_y",
@@ -573,4 +573,4 @@
             geometry="spherical")
         p = SlicePlot(ds, "r", "density")
         p.annotate_line_integral_convolution("velocity_theta", "velocity_phi")
-        yield assert_raises, YTDataTypeUnsupported, p.save, prefix
+        assert_raises(YTDataTypeUnsupported, p.save, prefix)

diff -r 2c35a84ba0ac19c0293e17d4b1df40aa51b40cd1 -r cb3e8d445eedf763d52756cd074856d4913aad3e yt/visualization/tests/test_export_frb.py
--- a/yt/visualization/tests/test_export_frb.py
+++ b/yt/visualization/tests/test_export_frb.py
@@ -31,10 +31,10 @@
     frb_ds = frb.export_dataset(fields=["density"], nprocs=8)
     dd_frb = frb_ds.all_data()
 
-    yield assert_equal, frb_ds.domain_left_edge.v, np.array([0.25,0.25,0.0])
-    yield assert_equal, frb_ds.domain_right_edge.v, np.array([0.75,0.75,1.0])
-    yield assert_equal, frb_ds.domain_width.v, np.array([0.5,0.5,1.0])
-    yield assert_equal, frb_ds.domain_dimensions, np.array([64,64,1], dtype="int64")
-    yield assert_allclose_units, frb["density"].sum(), \
-        dd_frb.quantities.total_quantity("density")
-    yield assert_equal, frb_ds.index.num_grids, 8
+    assert_equal(frb_ds.domain_left_edge.v, np.array([0.25,0.25,0.0]))
+    assert_equal(frb_ds.domain_right_edge.v, np.array([0.75,0.75,1.0]))
+    assert_equal(frb_ds.domain_width.v, np.array([0.5,0.5,1.0]))
+    assert_equal(frb_ds.domain_dimensions, np.array([64,64,1], dtype="int64"))
+    assert_allclose_units(frb["density"].sum(),
+                          dd_frb.quantities.total_quantity("density"))
+    assert_equal(frb_ds.index.num_grids, 8)

diff -r 2c35a84ba0ac19c0293e17d4b1df40aa51b40cd1 -r cb3e8d445eedf763d52756cd074856d4913aad3e yt/visualization/tests/test_fits_image.py
--- a/yt/visualization/tests/test_fits_image.py
+++ b/yt/visualization/tests/test_fits_image.py
@@ -47,14 +47,14 @@
     fits_prj = FITSProjection(ds, "z", ["density","temperature"], image_res=128,
                               width=(0.5,"unitary"))
 
-    yield assert_equal, fid1.get_data("density"), fits_prj.get_data("density")
-    yield assert_equal, fid1.get_data("temperature"), fits_prj.get_data("temperature")
+    assert_equal(fid1.get_data("density"), fits_prj.get_data("density"))
+    assert_equal(fid1.get_data("temperature"), fits_prj.get_data("temperature"))
 
     fid1.writeto("fid1.fits", clobber=True)
     new_fid1 = FITSImageData.from_file("fid1.fits")
 
-    yield assert_equal, fid1.get_data("density"), new_fid1.get_data("density")
-    yield assert_equal, fid1.get_data("temperature"), new_fid1.get_data("temperature")
+    assert_equal(fid1.get_data("density"), new_fid1.get_data("density"))
+    assert_equal(fid1.get_data("temperature"), new_fid1.get_data("temperature"))
 
     ds2 = load("fid1.fits")
     ds2.index
@@ -74,8 +74,8 @@
     fits_slc = FITSSlice(ds, "z", ["density","temperature"], image_res=128,
                          width=(0.5,"unitary"))
 
-    yield assert_equal, fid2.get_data("density"), fits_slc.get_data("density")
-    yield assert_equal, fid2.get_data("temperature"), fits_slc.get_data("temperature")
+    assert_equal(fid2.get_data("density"), fits_slc.get_data("density"))
+    assert_equal(fid2.get_data("temperature"), fits_slc.get_data("temperature"))
 
     dens_img = fid2.pop("density")
     temp_img = fid2.pop("temperature")
@@ -92,8 +92,8 @@
                                 image_res=128, center=[0.5, 0.42, 0.6],
                                 width=(0.5,"unitary"))
 
-    yield assert_equal, fid3.get_data("density"), fits_cut.get_data("density")
-    yield assert_equal, fid3.get_data("temperature"), fits_cut.get_data("temperature")
+    assert_equal(fid3.get_data("density"), fits_cut.get_data("density"))
+    assert_equal(fid3.get_data("temperature"), fits_cut.get_data("temperature"))
 
     fid3.create_sky_wcs([30.,45.], (1.0,"arcsec/kpc"))
     fid3.writeto("fid3.fits", clobber=True)
@@ -111,7 +111,7 @@
                                      width=(0.5,"unitary"), image_res=128,
                                      depth_res=128, depth=(0.5,"unitary"))
 
-    yield assert_equal, fid4.get_data("density"), fits_oap.get_data("density")
+    assert_equal(fid4.get_data("density"), fits_oap.get_data("density"))
 
     fid4.create_sky_wcs([30., 45.], (1.0, "arcsec/kpc"), replace_old_wcs=False)
     assert fid4.wcs.wcs.cunit[0] == "cm"

diff -r 2c35a84ba0ac19c0293e17d4b1df40aa51b40cd1 -r cb3e8d445eedf763d52756cd074856d4913aad3e yt/visualization/tests/test_offaxisprojection.py
--- a/yt/visualization/tests/test_offaxisprojection.py
+++ b/yt/visualization/tests/test_offaxisprojection.py
@@ -75,7 +75,7 @@
         image = off_axis_projection(*oap_args, **oap_kwargs)
         for wp_kwargs in wp_kwargs_list:
             write_projection(image, fn % i, **wp_kwargs)
-            yield assert_equal, os.path.exists(fn % i), True
+            assert_equal(os.path.exists(fn % i), True)
 
     if tmpdir:
         os.chdir(curdir)

diff -r 2c35a84ba0ac19c0293e17d4b1df40aa51b40cd1 -r cb3e8d445eedf763d52756cd074856d4913aad3e yt/visualization/tests/test_profile_plots.py
--- a/yt/visualization/tests/test_profile_plots.py
+++ b/yt/visualization/tests/test_profile_plots.py
@@ -133,12 +133,12 @@
     @parameterized.expand(param.explicit((fname, )) for fname in TEST_FLNMS)
     def test_profile_plot(self, fname):
         for p in self.profiles:
-            yield assert_fname(p.save(fname)[0])
+            assert_fname(p.save(fname)[0])
 
     @parameterized.expand(param.explicit((fname, )) for fname in TEST_FLNMS)
     def test_phase_plot(self, fname):
         for p in self.phases:
-            assert assert_fname(p.save(fname)[0])
+            assert_fname(p.save(fname)[0])
 
     def test_ipython_repr(self):
         self.profiles[0]._repr_html_()

diff -r 2c35a84ba0ac19c0293e17d4b1df40aa51b40cd1 -r cb3e8d445eedf763d52756cd074856d4913aad3e yt/visualization/tests/test_splat.py
--- a/yt/visualization/tests/test_splat.py
+++ b/yt/visualization/tests/test_splat.py
@@ -50,9 +50,9 @@
     before_hash = image.copy()
     fn = 'tmp.png'
     yt.write_bitmap(image, fn)
-    yield assert_equal, os.path.exists(fn), True
+    assert_equal(os.path.exists(fn), True)
     os.remove(fn)
-    yield assert_equal, before_hash, image
+    assert_equal(before_hash, image)
 
     os.chdir(curdir)
     # clean up


https://bitbucket.org/yt_analysis/yt/commits/5fea4d11bff0/
Changeset:   5fea4d11bff0
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 18:58:09+00:00
Summary:     Avoid runtime warning from matplotlib in write_projection
Affected #:  1 file

diff -r cb3e8d445eedf763d52756cd074856d4913aad3e -r 5fea4d11bff0210a48b137f56babc5151553ba1e yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -407,8 +407,6 @@
         if colorbar_label:
             cbar.ax.set_ylabel(colorbar_label)
 
-    fig.tight_layout()
-        
     suffix = get_image_suffix(filename)
 
     if suffix == '':
@@ -425,6 +423,8 @@
         mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
         canvas = FigureCanvasAgg(fig)
 
+    fig.tight_layout()
+
     canvas.print_figure(filename, dpi=dpi)
     return filename
 


https://bitbucket.org/yt_analysis/yt/commits/53ca35e91c60/
Changeset:   53ca35e91c60
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 18:59:28+00:00
Summary:     eliminate yield assert from yt.fields
Affected #:  1 file

diff -r 5fea4d11bff0210a48b137f56babc5151553ba1e -r 53ca35e91c6087080b1d806f3f29abdab4cfbe31 yt/fields/tests/test_magnetic_fields.py
--- a/yt/fields/tests/test_magnetic_fields.py
+++ b/yt/fields/tests/test_magnetic_fields.py
@@ -43,10 +43,10 @@
              dd2["magnetic_field_z"]**2)/(2.0*mu_0)
     emag2.convert_to_units("Pa")
 
-    yield assert_almost_equal, emag1, dd1["magnetic_energy"]
-    yield assert_almost_equal, emag2, dd2["magnetic_energy"]
+    assert_almost_equal(emag1, dd1["magnetic_energy"])
+    assert_almost_equal(emag2, dd2["magnetic_energy"])
 
     assert str(emag1.units) == str(dd1["magnetic_energy"].units)
     assert str(emag2.units) == str(dd2["magnetic_energy"].units)
 
-    yield assert_almost_equal, emag1.in_cgs(), emag2.in_cgs()
+    assert_almost_equal(emag1.in_cgs(), emag2.in_cgs())


https://bitbucket.org/yt_analysis/yt/commits/4c384ace62dc/
Changeset:   4c384ace62dc
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 19:14:19+00:00
Summary:     Eliminate yield assert from yt.geometry
Affected #:  8 files

diff -r 53ca35e91c6087080b1d806f3f29abdab4cfbe31 -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 yt/geometry/coordinates/tests/test_cartesian_coordinates.py
--- a/yt/geometry/coordinates/tests/test_cartesian_coordinates.py
+++ b/yt/geometry/coordinates/tests/test_cartesian_coordinates.py
@@ -21,11 +21,10 @@
         fd = ("index", "d%s" % axis)
         fp = ("index", "path_element_%s" % axis)
         ma = np.argmax(dd[fi])
-        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i]
+        assert_equal(dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i])
         mi = np.argmin(dd[fi])
-        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i]
-        yield assert_equal, dd[fd].min(), ds.index.get_smallest_dx()
-        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i]
-        yield assert_equal, dd[fd], dd[fp]
-    yield assert_equal, dd["cell_volume"].sum(dtype="float64"), \
-                        ds.domain_width.prod()
+        assert_equal(dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i])
+        assert_equal(dd[fd].min(), ds.index.get_smallest_dx())
+        assert_equal(dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i])
+        assert_equal(dd[fd], dd[fp])
+    assert_equal(dd["cell_volume"].sum(dtype="float64"), ds.domain_width.prod())

diff -r 53ca35e91c6087080b1d806f3f29abdab4cfbe31 -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 yt/geometry/coordinates/tests/test_cylindrical_coordinates.py
--- a/yt/geometry/coordinates/tests/test_cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/tests/test_cylindrical_coordinates.py
@@ -20,13 +20,13 @@
         fi = ("index", axis)
         fd = ("index", "d%s" % axis)
         ma = np.argmax(dd[fi])
-        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d
+        assert_equal(dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d)
         mi = np.argmin(dd[fi])
-        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d
-        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d
-    yield assert_almost_equal, dd["cell_volume"].sum(dtype="float64"), \
-                        np.pi*ds.domain_width[0]**2 * ds.domain_width[1]
-    yield assert_equal, dd["index", "path_element_r"], dd["index", "dr"]
-    yield assert_equal, dd["index", "path_element_z"], dd["index", "dz"]
-    yield assert_equal, dd["index", "path_element_theta"], \
-                        dd["index", "r"] * dd["index", "dtheta"]
+        assert_equal(dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d)
+        assert_equal(dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d)
+    assert_almost_equal(dd["cell_volume"].sum(dtype="float64"),
+                        np.pi*ds.domain_width[0]**2 * ds.domain_width[1])
+    assert_equal(dd["index", "path_element_r"], dd["index", "dr"])
+    assert_equal(dd["index", "path_element_z"], dd["index", "dz"])
+    assert_equal(dd["index", "path_element_theta"],
+                 dd["index", "r"] * dd["index", "dtheta"])

diff -r 53ca35e91c6087080b1d806f3f29abdab4cfbe31 -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 yt/geometry/coordinates/tests/test_geographic_coordinates.py
--- a/yt/geometry/coordinates/tests/test_geographic_coordinates.py
+++ b/yt/geometry/coordinates/tests/test_geographic_coordinates.py
@@ -25,35 +25,27 @@
         fi = ("index", axis)
         fd = ("index", "d%s" % axis)
         ma = np.argmax(dd[fi])
-        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d
+        assert_equal(dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d)
         mi = np.argmin(dd[fi])
-        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d
-        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d
+        assert_equal(dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d)
+        assert_equal(dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d)
     inner_r = ds.surface_height
     outer_r = ds.surface_height + ds.domain_width[2]
-    yield assert_equal, dd["index","dtheta"], \
-                        dd["index","dlatitude"]*np.pi/180.0
-    yield assert_equal, dd["index","dphi"], \
-                        dd["index","dlongitude"]*np.pi/180.0
+    assert_equal(dd["index","dtheta"], dd["index","dlatitude"]*np.pi/180.0)
+    assert_equal(dd["index","dphi"], dd["index","dlongitude"]*np.pi/180.0)
     # Note our terrible agreement here.
-    yield assert_rel_equal, dd["cell_volume"].sum(dtype="float64"), \
-                        (4.0/3.0) * np.pi * (outer_r**3 - inner_r**3), \
-                        3
-    yield assert_equal, dd["index", "path_element_altitude"], \
-                        dd["index", "daltitude"]
-    yield assert_equal, dd["index", "path_element_altitude"], \
-                        dd["index", "dr"]
+    assert_rel_equal(dd["cell_volume"].sum(dtype="float64"),
+                     (4.0/3.0) * np.pi * (outer_r**3 - inner_r**3), 3)
+    assert_equal(dd["index", "path_element_altitude"], dd["index", "daltitude"])
+    assert_equal(dd["index", "path_element_altitude"], dd["index", "dr"])
     # Note that latitude corresponds to theta, longitude to phi
-    yield assert_equal, dd["index", "path_element_latitude"], \
-                        dd["index", "r"] * \
-                        dd["index", "dlatitude"] * np.pi/180.0
-    yield assert_equal, dd["index", "path_element_longitude"], \
-                        dd["index", "r"] * \
-                        dd["index", "dlongitude"] * np.pi/180.0 * \
-                        np.sin((dd["index", "latitude"] + 90.0) * np.pi/180.0)
+    assert_equal(dd["index", "path_element_latitude"],
+                 dd["index", "r"] * dd["index", "dlatitude"] * np.pi/180.0)
+    assert_equal(dd["index", "path_element_longitude"],
+                 (dd["index", "r"] * dd["index", "dlongitude"] * np.pi/180.0 *
+                  np.sin((dd["index", "latitude"] + 90.0) * np.pi/180.0)))
     # We also want to check that our radius is correct
-    yield assert_equal, dd["index","r"], \
-                        dd["index","altitude"] + ds.surface_height
+    assert_equal(dd["index","r"], dd["index","altitude"] + ds.surface_height)
 
 def test_internal_geographic_coordinates():
     # We're going to load up a simple AMR grid and check its volume
@@ -70,30 +62,23 @@
         fi = ("index", axis)
         fd = ("index", "d%s" % axis)
         ma = np.argmax(dd[fi])
-        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d
+        assert_equal(dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d)
         mi = np.argmin(dd[fi])
-        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d
-        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d
+        assert_equal(dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d)
+        assert_equal(dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d)
     inner_r = ds.outer_radius - ds.domain_right_edge[2]
     outer_r = ds.outer_radius
-    yield assert_equal, dd["index","dtheta"], \
-                        dd["index","dlatitude"]*np.pi/180.0
-    yield assert_equal, dd["index","dphi"], \
-                        dd["index","dlongitude"]*np.pi/180.0
-    yield assert_rel_equal, dd["cell_volume"].sum(dtype="float64"), \
-                        (4.0/3.0) * np.pi * (outer_r**3 - inner_r**3), 10
-    yield assert_equal, dd["index", "path_element_depth"], \
-                        dd["index", "ddepth"]
-    yield assert_equal, dd["index", "path_element_depth"], \
-                        dd["index", "dr"]
+    assert_equal(dd["index","dtheta"], dd["index","dlatitude"]*np.pi/180.0)
+    assert_equal(dd["index","dphi"], dd["index","dlongitude"]*np.pi/180.0)
+    assert_rel_equal(dd["cell_volume"].sum(dtype="float64"),
+                     (4.0/3.0) * np.pi * (outer_r**3 - inner_r**3), 10)
+    assert_equal(dd["index", "path_element_depth"], dd["index", "ddepth"])
+    assert_equal(dd["index", "path_element_depth"], dd["index", "dr"])
     # Note that latitude corresponds to theta, longitude to phi
-    yield assert_equal, dd["index", "path_element_latitude"], \
-                        dd["index", "r"] * \
-                        dd["index", "dlatitude"] * np.pi/180.0
-    yield assert_equal, dd["index", "path_element_longitude"], \
-                        dd["index", "r"] * \
-                        dd["index", "dlongitude"] * np.pi/180.0 * \
-                        np.sin((dd["index", "latitude"] + 90.0) * np.pi/180.0)
+    assert_equal(dd["index", "path_element_latitude"],
+                 dd["index", "r"] * dd["index", "dlatitude"] * np.pi/180.0)
+    assert_equal(dd["index", "path_element_longitude"],
+                 (dd["index", "r"] * dd["index", "dlongitude"] * np.pi/180.0 *
+                  np.sin((dd["index", "latitude"] + 90.0) * np.pi/180.0)))
     # We also want to check that our radius is correct
-    yield assert_equal, dd["index","r"], \
-                        -1.0*dd["index","depth"] + ds.outer_radius
+    assert_equal(dd["index","r"], -1.0*dd["index","depth"] + ds.outer_radius)

diff -r 53ca35e91c6087080b1d806f3f29abdab4cfbe31 -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 yt/geometry/coordinates/tests/test_polar_coordinates.py
--- a/yt/geometry/coordinates/tests/test_polar_coordinates.py
+++ b/yt/geometry/coordinates/tests/test_polar_coordinates.py
@@ -21,13 +21,13 @@
         fi = ("index", axis)
         fd = ("index", "d%s" % axis)
         ma = np.argmax(dd[fi])
-        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d
+        assert_equal(dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d)
         mi = np.argmin(dd[fi])
-        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d
-        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d
-    yield assert_almost_equal, dd["cell_volume"].sum(dtype="float64"), \
-                        np.pi*ds.domain_width[0]**2 * ds.domain_width[2]
-    yield assert_equal, dd["index", "path_element_r"], dd["index", "dr"]
-    yield assert_equal, dd["index", "path_element_z"], dd["index", "dz"]
-    yield assert_equal, dd["index", "path_element_theta"], \
-                        dd["index", "r"] * dd["index", "dtheta"]
+        assert_equal(dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d)
+        assert_equal(dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d)
+    assert_almost_equal(dd["cell_volume"].sum(dtype="float64"),
+                        np.pi*ds.domain_width[0]**2 * ds.domain_width[2])
+    assert_equal(dd["index", "path_element_r"], dd["index", "dr"])
+    assert_equal(dd["index", "path_element_z"], dd["index", "dz"])
+    assert_equal(dd["index", "path_element_theta"],
+                 dd["index", "r"] * dd["index", "dtheta"])

diff -r 53ca35e91c6087080b1d806f3f29abdab4cfbe31 -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 yt/geometry/coordinates/tests/test_spherical_coordinates.py
--- a/yt/geometry/coordinates/tests/test_spherical_coordinates.py
+++ b/yt/geometry/coordinates/tests/test_spherical_coordinates.py
@@ -20,19 +20,19 @@
         fi = ("index", axis)
         fd = ("index", "d%s" % axis)
         ma = np.argmax(dd[fi])
-        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d
+        assert_equal(dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d)
         mi = np.argmin(dd[fi])
-        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d
-        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d
+        assert_equal(dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d)
+        assert_equal(dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d)
     # Note that we're using a lot of funny transforms to get to this, so we do
     # not expect to get actual agreement.  This is a bit of a shame, but I
     # don't think it is avoidable as of right now.  Real datasets will almost
     # certainly be correct, if this is correct to 3 decimel places.
-    yield assert_almost_equal, dd["cell_volume"].sum(dtype="float64"), \
-                        (4.0/3.0) * np.pi*ds.domain_width[0]**3
-    yield assert_equal, dd["index", "path_element_r"], dd["index", "dr"]
-    yield assert_equal, dd["index", "path_element_theta"], \
-                        dd["index", "r"] * dd["index", "dtheta"]
-    yield assert_equal, dd["index", "path_element_phi"], \
-                        dd["index", "r"] * dd["index", "dphi"] * \
-                          np.sin(dd["index","theta"])
+    assert_almost_equal(dd["cell_volume"].sum(dtype="float64"),
+                        (4.0/3.0) * np.pi*ds.domain_width[0]**3)
+    assert_equal(dd["index", "path_element_r"], dd["index", "dr"])
+    assert_equal(dd["index", "path_element_theta"],
+                 dd["index", "r"] * dd["index", "dtheta"])
+    assert_equal(dd["index", "path_element_phi"],
+                 (dd["index", "r"] * dd["index", "dphi"] *
+                  np.sin(dd["index","theta"])))

diff -r 53ca35e91c6087080b1d806f3f29abdab4cfbe31 -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 yt/geometry/tests/test_grid_container.py
--- a/yt/geometry/tests/test_grid_container.py
+++ b/yt/geometry/tests/test_grid_container.py
@@ -57,15 +57,15 @@
     grid_indices = [grid.id - grid._id_offset for grid in test_ds.index.grids]
     grid_nchild = [len(grid.Children) for grid in test_ds.index.grids]
 
-    yield assert_equal, levels, grid_levels
-    yield assert_equal, indices, grid_indices
-    yield assert_equal, nchild, grid_nchild
+    assert_equal(levels, grid_levels)
+    assert_equal(indices, grid_indices)
+    assert_equal(nchild, grid_nchild)
 
     for i, grid in enumerate(test_ds.index.grids):
         if grid_nchild[i] > 0:
             grid_children = np.array([child.id - child._id_offset
                                       for child in grid.Children])
-            yield assert_equal, grid_children, children[i]
+            assert_equal(grid_children, children[i])
 
 def test_find_points():
     """Main test suite for MatchPoints"""
@@ -98,30 +98,29 @@
                 pt_level = grid.Level
                 grid_inds[ind] = grid.id - grid._id_offset
 
-    yield assert_equal, point_grid_inds, grid_inds
+    assert_equal(point_grid_inds, grid_inds)
 
     # Test wheter find_points works for lists
     point_grids, point_grid_inds = test_ds.index._find_points(randx.tolist(),
                                                               randy.tolist(),
                                                               randz.tolist())
-    yield assert_equal, point_grid_inds, grid_inds
+    assert_equal(point_grid_inds, grid_inds)
 
     # Test if find_points works for scalar
     ind = random.randint(0, num_points - 1)
     point_grids, point_grid_inds = test_ds.index._find_points(randx[ind],
                                                               randy[ind],
                                                               randz[ind])
-    yield assert_equal, point_grid_inds, grid_inds[ind]
+    assert_equal(point_grid_inds, grid_inds[ind])
 
     # Test if find_points fails properly for non equal indices' array sizes
-    yield assert_raises, AssertionError, test_ds.index._find_points, \
-        [0], 1.0, [2, 3]
+    assert_raises(AssertionError, test_ds.index._find_points, [0], 1.0, [2, 3])
 
 def test_grid_arrays_view():
     ds = setup_test_ds()
     tree = ds.index._get_grid_tree()
     grid_arr = tree.grid_arrays
-    yield assert_equal, grid_arr['left_edge'], ds.index.grid_left_edge
-    yield assert_equal, grid_arr['right_edge'], ds.index.grid_right_edge
-    yield assert_equal, grid_arr['dims'], ds.index.grid_dimensions
-    yield assert_equal, grid_arr['level'], ds.index.grid_levels[:,0]
+    assert_equal(grid_arr['left_edge'], ds.index.grid_left_edge)
+    assert_equal(grid_arr['right_edge'], ds.index.grid_right_edge)
+    assert_equal(grid_arr['dims'], ds.index.grid_dimensions)
+    assert_equal(grid_arr['level'], ds.index.grid_levels[:,0])

diff -r 53ca35e91c6087080b1d806f3f29abdab4cfbe31 -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 yt/geometry/tests/test_neighbor_search.py
--- a/yt/geometry/tests/test_neighbor_search.py
+++ b/yt/geometry/tests/test_neighbor_search.py
@@ -54,5 +54,5 @@
         min_in[i] = np.argmin(np.abs(radius - nearest_neighbors[i]))
         #if i == 34: raise RuntimeError
         #dd.field_data.pop(("all", "particle_radius"))
-    yield assert_equal, (min_in == 63).sum(), min_in.size
-    yield assert_array_almost_equal, nearest_neighbors, all_neighbors
+    assert_equal((min_in == 63).sum(), min_in.size)
+    assert_array_almost_equal(nearest_neighbors, all_neighbors)

diff -r 53ca35e91c6087080b1d806f3f29abdab4cfbe31 -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -61,11 +61,11 @@
         total_count = np.zeros(len(tc), dtype="int32")
         for i in sorted(tc):
             total_count[i] = tc[i]
-        yield assert_equal, octree.nocts, total_count.sum()
+        assert_equal(octree.nocts, total_count.sum())
         # This visits every cell -- including those covered by octs.
         #for dom in range(ndom):
         #    level_count += octree.count_levels(total_count.size-1, dom, mask)
-        yield assert_equal, total_count, [1, 8, 64, 64, 256, 536, 1856, 1672]
+        assert_equal(total_count, [1, 8, 64, 64, 256, 536, 1856, 1672])
 
 def test_save_load_octree():
     np.random.seed(int(0x4d3d3d3))
@@ -85,15 +85,15 @@
     always = AlwaysSelector(None)
     ir1 = octree.ires(always)
     ir2 = loaded.ires(always)
-    yield assert_equal, ir1, ir2
+    assert_equal(ir1, ir2)
 
     fc1 = octree.fcoords(always)
     fc2 = loaded.fcoords(always)
-    yield assert_equal, fc1, fc2
+    assert_equal(fc1, fc2)
 
     fw1 = octree.fwidth(always)
     fw2 = loaded.fwidth(always)
-    yield assert_equal, fw1, fw2
+    assert_equal(fw1, fw2)
 
 def test_particle_octree_counts():
     np.random.seed(int(0x4d3d3d3))
@@ -112,9 +112,9 @@
         dd = ds.all_data()
         bi = dd["io","mesh_id"]
         v = np.bincount(bi.astype("intp"))
-        yield assert_equal, v.max() <= n_ref, True
+        assert_equal(v.max() <= n_ref, True)
         bi2 = dd["all","mesh_id"]
-        yield assert_equal, bi, bi2
+        assert_equal(bi, bi2)
 
 def test_particle_overrefine():
     np.random.seed(int(0x4d3d3d3))
@@ -140,9 +140,9 @@
             dd2 = ds2.all_data()
             v2 = dict((a, getattr(dd2, a)) for a in _attrs)
             for a in sorted(v1):
-                yield assert_equal, v1[a].size * f, v2[a].size
+                assert_equal(v1[a].size * f, v2[a].size)
             cv2 = dd2["cell_volume"].sum(dtype="float64")
-            yield assert_equal, cv1, cv2
+            assert_equal(cv1, cv2)
 
 index_ptype_snap = "snapshot_033/snap_033.0.hdf5"
 @requires_file(index_ptype_snap)
@@ -156,8 +156,8 @@
     cv = dd["cell_volume"]
     cv_all = dd_all["cell_volume"]
     cv_pt0 = dd_pt0["cell_volume"]
-    yield assert_equal, cv.shape, cv_all.shape
-    yield assert_equal, cv.sum(dtype="float64"), cv_pt0.sum(dtype="float64")
+    assert_equal(cv.shape, cv_all.shape)
+    assert_equal(cv.sum(dtype="float64"), cv_pt0.sum(dtype="float64"))
 
 class FakeDS:
     domain_left_edge = None
@@ -209,20 +209,15 @@
             fr.set_edges(i)
             selector = RegionSelector(fr)
             df = reg.identify_data_files(selector)
-            yield assert_equal, len(df), 1
-            yield assert_equal, df[0], i
+            assert_equal(len(df), 1)
+            assert_equal(df[0], i)
             pos[:,0] += 1.0
 
         for mask in reg.masks:
             maxs = np.unique(mask.max(axis=-1).max(axis=-1))
             mins = np.unique(mask.min(axis=-1).min(axis=-1))
-            yield assert_equal, maxs, mins
-            yield assert_equal, maxs, np.unique(mask)
-
-if __name__=="__main__":
-    for i in test_add_particles_random():
-        i[0](*i[1:])
-    time.sleep(1)
+            assert_equal(maxs, mins)
+            assert_equal(maxs, np.unique(mask))
 
 def test_position_location():
     np.random.seed(int(0x4d3d3d3))
@@ -246,5 +241,6 @@
 @requires_file(os33)
 def test_get_smallest_dx():
     ds = yt.load(os33)
-    yield assert_equal, ds.index.get_smallest_dx(), \
-        ds.domain_width / (ds.domain_dimensions*2.**(ds.index.max_level))
+    small_dx = (
+        ds.domain_width / (ds.domain_dimensions*2.**(ds.index.max_level)))
+    assert_equal(ds.index.get_smallest_dx(), small_dx)


https://bitbucket.org/yt_analysis/yt/commits/a7792a531e66/
Changeset:   a7792a531e66
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 20:03:07+00:00
Summary:     remove yield assert from yt.utilities
Affected #:  12 files

diff -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 -r a7792a531e66950baf09b5727bb627e8bb193040 yt/utilities/lib/tests/test_alt_ray_tracers.py
--- a/yt/utilities/lib/tests/test_alt_ray_tracers.py
+++ b/yt/utilities/lib/tests/test_alt_ray_tracers.py
@@ -65,20 +65,20 @@
         t, s, rztheta, inds = cylindrical_ray_trace(p1, p2, left_grid, right_grid)
         npoints = len(t)
 
-        yield check_monotonic_inc, t
-        yield assert_less_equal, 0.0, t[0]
-        yield assert_less_equal, t[-1], 1.0
+        check_monotonic_inc(t)
+        assert_less_equal(0.0, t[0])
+        assert_less_equal(t[-1], 1.0)
 
-        yield check_monotonic_inc, s
-        yield assert_less_equal, 0.0, s[0]
-        yield assert_less_equal, s[-1], pathlen
-        yield assert_equal, npoints, len(s)
+        check_monotonic_inc(s)
+        assert_less_equal(0.0, s[0])
+        assert_less_equal(s[-1], pathlen)
+        assert_equal(npoints, len(s))
 
-        yield assert_equal, (npoints, 3), rztheta.shape
-        yield check_bounds, rztheta[:,0],  0.0, 1.0
-        yield check_bounds, rztheta[:,1], -1.0, 1.0
-        yield check_bounds, rztheta[:,2],  0.0, 2*np.pi
-        yield check_monotonic_inc, rztheta[:,2]
+        assert_equal((npoints, 3), rztheta.shape)
+        check_bounds(rztheta[:,0],  0.0, 1.0)
+        check_bounds(rztheta[:,1], -1.0, 1.0)
+        check_bounds(rztheta[:,2],  0.0, 2*np.pi)
+        check_monotonic_inc(rztheta[:,2])
 
-        yield assert_equal, npoints, len(inds)
-        yield check_bounds, inds, 0, len(left_grid)-1
+        assert_equal(npoints, len(inds))
+        check_bounds(inds, 0, len(left_grid)-1)

diff -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 -r a7792a531e66950baf09b5727bb627e8bb193040 yt/utilities/lib/tests/test_bitarray.py
--- a/yt/utilities/lib/tests/test_bitarray.py
+++ b/yt/utilities/lib/tests/test_bitarray.py
@@ -10,21 +10,21 @@
         arr_in = (np.random.random(32**3 + i) > 0.5)
         b = ba.bitarray(arr = arr_in)
         if i > 0:
-            yield assert_equal, b.ibuf.size, (32**3)/8.0 + 1
+            assert_equal(b.ibuf.size, (32**3)/8.0 + 1)
         arr_out = b.as_bool_array()
-        yield assert_equal, arr_in, arr_out
+        assert_equal(arr_in, arr_out)
 
         # Let's check we can do it without feeding it at first
         b = ba.bitarray(size = arr_in.size)
         b.set_from_array(arr_in)
         arr_out = b.as_bool_array()
-        yield assert_equal, arr_in, arr_out
+        assert_equal(arr_in, arr_out)
 
     # Try a big array
     arr_in = (np.random.random(32**3 + i) > 0.5)
     b = ba.bitarray(arr = arr_in)
     arr_out = b.as_bool_array()
-    yield assert_equal, arr_in, arr_out
+    assert_equal(arr_in, arr_out)
 
     # Let's check we can do something interesting.
     arr_in1 = (np.random.random(32**3) > 0.5)
@@ -32,11 +32,11 @@
     b1 = ba.bitarray(arr = arr_in1)
     b2 = ba.bitarray(arr = arr_in2)
     b3 = ba.bitarray(arr = (arr_in1 & arr_in2))
-    yield assert_equal, (b1.ibuf & b2.ibuf), b3.ibuf
+    assert_equal((b1.ibuf & b2.ibuf), b3.ibuf)
 
     b = ba.bitarray(10)
     for i in range(10):
         b.set_value(i, 2) # 2 should evaluate to True
         arr = b.as_bool_array()
-        yield assert_equal, arr[:i+1].all(), True
-        yield assert_equal, arr[i+1:].any(), False
+        assert_equal(arr[:i+1].all(), True)
+        assert_equal(arr[i+1:].any(), False)

diff -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 -r a7792a531e66950baf09b5727bb627e8bb193040 yt/utilities/lib/tests/test_ragged_arrays.py
--- a/yt/utilities/lib/tests/test_ragged_arrays.py
+++ b/yt/utilities/lib/tests/test_ragged_arrays.py
@@ -38,10 +38,10 @@
                 arr = values[indices[i:i+v]]
                 if dtype == "float32":
                     # Numpy 1.9.1 changes the accumulator type to promote
-                    yield assert_rel_equal, op(arr), out_values[j], 6
+                    assert_rel_equal(op(arr), out_values[j], 6)
                 elif dtype == "float64":
                     # Numpy 1.9.1 changes the accumulator type to promote
-                    yield assert_rel_equal, op(arr), out_values[j], 12
+                    assert_rel_equal(op(arr), out_values[j], 12)
                 else:
-                    yield assert_equal, op(arr), out_values[j]
+                    assert_equal(op(arr), out_values[j])
                 i += v

diff -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 -r a7792a531e66950baf09b5727bb627e8bb193040 yt/utilities/tests/test_amr_kdtree.py
--- a/yt/utilities/tests/test_amr_kdtree.py
+++ b/yt/utilities/tests/test_amr_kdtree.py
@@ -35,12 +35,11 @@
     kd = AMRKDTree(ds)
 
     volume = kd.count_volume()
-    yield assert_equal, volume, \
-        np.prod(ds.domain_right_edge - ds.domain_left_edge)
+    assert_equal(volume, np.prod(ds.domain_right_edge - ds.domain_left_edge))
 
     cells = kd.count_cells()
     true_cells = ds.all_data().quantities['TotalQuantity']('Ones')[0]
-    yield assert_equal, cells, true_cells
+    assert_equal(cells, true_cells)
 
     # This largely reproduces the AMRKDTree.tree.check_tree() functionality
     tree_ok = True
@@ -59,7 +58,7 @@
         tree_ok *= np.all(grid.RightEdge >= nre)
         tree_ok *= np.all(dims > 0)
 
-    yield assert_equal, True, tree_ok
+    assert_equal(True, tree_ok)
 
 def test_amr_kdtree_set_fields():
     ds = fake_amr_ds(fields=["density", "pressure"])

diff -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 -r a7792a531e66950baf09b5727bb627e8bb193040 yt/utilities/tests/test_chemical_formulas.py
--- a/yt/utilities/tests/test_chemical_formulas.py
+++ b/yt/utilities/tests/test_chemical_formulas.py
@@ -16,8 +16,8 @@
     for formula, components, charge in _molecules:
         f = ChemicalFormula(formula)
         w = sum( n * periodic_table[e].weight for e, n in components)
-        yield assert_equal, f.charge, charge
-        yield assert_equal, f.weight, w
+        assert_equal(f.charge, charge)
+        assert_equal(f.weight, w)
         for (n, c1), (e, c2) in zip(components, f.elements):
-            yield assert_equal, n, e.symbol
-            yield assert_equal, c1, c2
+            assert_equal(n, e.symbol)
+            assert_equal(c1, c2)

diff -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 -r a7792a531e66950baf09b5727bb627e8bb193040 yt/utilities/tests/test_cosmology.py
--- a/yt/utilities/tests/test_cosmology.py
+++ b/yt/utilities/tests/test_cosmology.py
@@ -30,7 +30,7 @@
         co = Cosmology()
         # random sample over interval (-1,100]
         z = -101 * np.random.random() + 100
-        yield assert_rel_equal, co.hubble_time(z), co.t_from_z(z), 5
+        assert_rel_equal(co.hubble_time(z), co.t_from_z(z), 5)
 
 def test_z_t_conversion():
     """
@@ -44,4 +44,4 @@
         z1 = -101 * np.random.random() + 100
         t = co.t_from_z(z1)
         z2 = co.z_from_t(t)
-        yield assert_rel_equal, z1, z2, 10
+        assert_rel_equal(z1, z2, 10)

diff -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 -r a7792a531e66950baf09b5727bb627e8bb193040 yt/utilities/tests/test_interpolators.py
--- a/yt/utilities/tests/test_interpolators.py
+++ b/yt/utilities/tests/test_interpolators.py
@@ -17,7 +17,7 @@
     fv = {'x': np.mgrid[0.0:1.0:64j]}
     # evenly spaced bins
     ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "x", True)
-    yield assert_array_equal, ufi(fv), random_data
+    assert_array_equal(ufi(fv), random_data)
     
     # randomly spaced bins
     size = 64
@@ -26,7 +26,7 @@
     ufi = lin.UnilinearFieldInterpolator(random_data, 
                                          np.linspace(0.0, 1.0, size) + shift, 
                                          "x", True)
-    yield assert_array_almost_equal, ufi(fv), random_data, 15
+    assert_array_almost_equal(ufi(fv), random_data, 15)
 
 def test_linear_interpolator_2d():
     random_data = np.random.random((64, 64))
@@ -35,7 +35,7 @@
                np.mgrid[0.0:1.0:64j, 0.0:1.0:64j]))
     bfi = lin.BilinearFieldInterpolator(random_data,
             (0.0, 1.0, 0.0, 1.0), "xy", True)
-    yield assert_array_equal, bfi(fv), random_data
+    assert_array_equal(bfi(fv), random_data)
 
     # randomly spaced bins
     size = 64
@@ -46,7 +46,7 @@
     fv["y"] += shifts["y"]
     bfi = lin.BilinearFieldInterpolator(random_data,
             (bins + shifts["x"], bins + shifts["y"]), "xy", True)
-    yield assert_array_almost_equal, bfi(fv), random_data, 15
+    assert_array_almost_equal(bfi(fv), random_data, 15)
 
 def test_linear_interpolator_3d():
     random_data = np.random.random((64, 64, 64))
@@ -55,7 +55,7 @@
                np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j]))
     tfi = lin.TrilinearFieldInterpolator(random_data,
             (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True)
-    yield assert_array_almost_equal, tfi(fv), random_data
+    assert_array_almost_equal(tfi(fv), random_data)
 
     # randomly spaced bins
     size = 64
@@ -68,7 +68,7 @@
     tfi = lin.TrilinearFieldInterpolator(random_data,
             (bins + shifts["x"], bins + shifts["y"], 
              bins + shifts["z"]), "xyz", True)
-    yield assert_array_almost_equal, tfi(fv), random_data, 15
+    assert_array_almost_equal(tfi(fv), random_data, 15)
     
 
 def test_ghost_zone_extrapolation():
@@ -98,9 +98,9 @@
                                   xz, np.array([0.0, 0.0, 0.0], dtype="f8"))
 
         ii = (lx, ly, lz)[i]
-        yield assert_array_equal, ii, vec[ax]
-        yield assert_array_equal, ii, xi
-        yield assert_array_equal, ii, xz
+        assert_array_equal(ii, vec[ax])
+        assert_array_equal(ii, xi)
+        assert_array_equal(ii, xz)
 
 
 def test_get_vertex_centered_data():

diff -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 -r a7792a531e66950baf09b5727bb627e8bb193040 yt/utilities/tests/test_minimal_representation.py
--- a/yt/utilities/tests/test_minimal_representation.py
+++ b/yt/utilities/tests/test_minimal_representation.py
@@ -29,18 +29,18 @@
     proj2 = ds.proj(field, "z", data_source=sp)
 
     proj1_c = ds.proj(field, "z")
-    yield assert_equal, proj1[field], proj1_c[field]
+    assert_equal(proj1[field], proj1_c[field])
 
     proj2_c = ds.proj(field, "z", data_source=sp)
-    yield assert_equal, proj2[field], proj2_c[field]
+    assert_equal(proj2[field], proj2_c[field])
 
     def fail_for_different_method():
         proj2_c = ds.proj(field, "z", data_source=sp, method="mip")
         return (proj2[field] == proj2_c[field]).all()
-    yield assert_raises, YTUnitOperationError, fail_for_different_method
+    assert_raises(YTUnitOperationError, fail_for_different_method)
 
     def fail_for_different_source():
         sp = ds.sphere(ds.domain_center, (2, 'kpc'))
         proj2_c = ds.proj(field, "z", data_source=sp, method="integrate")
         return assert_equal(proj2_c[field], proj2[field])
-    yield assert_raises, AssertionError, fail_for_different_source
+    assert_raises(AssertionError, fail_for_different_source)

diff -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 -r a7792a531e66950baf09b5727bb627e8bb193040 yt/utilities/tests/test_particle_generator.py
--- a/yt/utilities/tests/test_particle_generator.py
+++ b/yt/utilities/tests/test_particle_generator.py
@@ -44,9 +44,9 @@
     # Test to make sure we ended up with the right number of particles per grid
     particles1.apply_to_stream()
     particles_per_grid1 = [grid.NumberOfParticles for grid in ds.index.grids]
-    yield assert_equal, particles_per_grid1, particles1.NumberOfParticles
+    assert_equal(particles_per_grid1, particles1.NumberOfParticles)
     particles_per_grid1 = [len(grid["particle_position_x"]) for grid in ds.index.grids]
-    yield assert_equal, particles_per_grid1, particles1.NumberOfParticles
+    assert_equal(particles_per_grid1, particles1.NumberOfParticles)
 
     tags = uconcatenate([grid["particle_index"] for grid in ds.index.grids])
     assert(np.unique(tags).size == num_particles)
@@ -81,23 +81,23 @@
     #Test the number of particles again
     particles2.apply_to_stream()
     particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]
-    yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles
+    assert_equal(particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles)
 
     [grid.field_data.clear() for grid in ds.index.grids]
     particles_per_grid2 = [len(grid["particle_position_x"]) for grid in ds.index.grids]
-    yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles
+    assert_equal(particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles)
 
     #Test the uniqueness of tags
     tags = np.concatenate([grid["particle_index"] for grid in ds.index.grids])
     tags.sort()
-    yield assert_equal, tags, np.arange((np.product(pdims)+num_particles))
+    assert_equal(tags, np.arange((np.product(pdims)+num_particles)))
 
     # Test that the old particles have zero for the new field
     old_particle_temps = [grid["particle_gas_temperature"][:particles_per_grid1[i]]
                           for i, grid in enumerate(ds.index.grids)]
     test_zeros = [np.zeros((particles_per_grid1[i])) 
                   for i, grid in enumerate(ds.index.grids)]
-    yield assert_equal, old_particle_temps, test_zeros
+    assert_equal(old_particle_temps, test_zeros)
 
     #Now dump all of these particle fields out into a dict
     pdata = {}
@@ -111,10 +111,6 @@
     
     #Test the number of particles again
     particles_per_grid3 = [grid.NumberOfParticles for grid in ds.index.grids]
-    yield assert_equal, particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles
+    assert_equal(particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles)
     particles_per_grid2 = [len(grid["particle_position_z"]) for grid in ds.index.grids]
-    yield assert_equal, particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles
-
-if __name__=="__main__":
-    for n, i in enumerate(test_particle_generator()):
-        i[0](*i[1:])
+    assert_equal(particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles)

diff -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 -r a7792a531e66950baf09b5727bb627e8bb193040 yt/utilities/tests/test_periodic_table.py
--- a/yt/utilities/tests/test_periodic_table.py
+++ b/yt/utilities/tests/test_periodic_table.py
@@ -9,9 +9,9 @@
         # If num == -1, then we are in one of the things like Deuterium or El
         # that are not elements by themselves.
         if num == -1: e0 = e1
-        yield assert_equal, id(e0), id(e1)
-        yield assert_equal, id(e0), id(e2)
-        yield assert_equal, e0.num, num
-        yield assert_equal, e0.weight, w
-        yield assert_equal, e0.name, name
-        yield assert_equal, e0.symbol, sym
+        assert_equal(id(e0), id(e1))
+        assert_equal(id(e0), id(e2))
+        assert_equal(e0.num, num)
+        assert_equal(e0.weight, w)
+        assert_equal(e0.name, name)
+        assert_equal(e0.symbol, sym)

diff -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 -r a7792a531e66950baf09b5727bb627e8bb193040 yt/utilities/tests/test_periodicity.py
--- a/yt/utilities/tests/test_periodicity.py
+++ b/yt/utilities/tests/test_periodicity.py
@@ -15,21 +15,21 @@
     b = [0.9,0.9,0.9]
     period = 1.
     dist = periodic_dist(a,b,period)
-    yield assert_almost_equal, dist, 0.34641016151377535
+    assert_almost_equal(dist, 0.34641016151377535)
     dist = periodic_dist(a, b, period, (True, False, False))
-    yield assert_almost_equal, dist, 1.1489125293076059
+    assert_almost_equal(dist, 1.1489125293076059)
     dist = periodic_dist(a, b, period, (False, True, False))
-    yield assert_almost_equal, dist, 1.1489125293076059
+    assert_almost_equal(dist, 1.1489125293076059)
     dist = periodic_dist(a, b, period, (False, False, True))
-    yield assert_almost_equal, dist, 1.1489125293076059
+    assert_almost_equal(dist, 1.1489125293076059)
     dist = periodic_dist(a, b, period, (True, True, False))
-    yield assert_almost_equal, dist, 0.84852813742385713
+    assert_almost_equal(dist, 0.84852813742385713)
     dist = periodic_dist(a, b, period, (True, False, True))
-    yield assert_almost_equal, dist, 0.84852813742385713
+    assert_almost_equal(dist, 0.84852813742385713)
     dist = periodic_dist(a, b, period, (False, True, True))
-    yield assert_almost_equal, dist, 0.84852813742385713
+    assert_almost_equal(dist, 0.84852813742385713)
     dist = euclidean_dist(a,b)
-    yield assert_almost_equal, dist, 1.3856406460551021
+    assert_almost_equal(dist, 1.3856406460551021)
 
     # Now test the more complicated cases where we're calculaing radii based 
     # on data objects
@@ -43,12 +43,12 @@
     center = np.tile(np.reshape(np.array(c), (positions.shape[0],)+n_tup),(1,)+positions.shape[1:])
     
     dist = periodic_dist(positions, center, period, ds.periodicity)
-    yield assert_almost_equal, dist.min(), 0.00270632938683
-    yield assert_almost_equal, dist.max(), 0.863319074398
+    assert_almost_equal(dist.min(), 0.00270632938683)
+    assert_almost_equal(dist.max(), 0.863319074398)
 
     dist = euclidean_dist(positions, center)
-    yield assert_almost_equal, dist.min(), 0.00270632938683
-    yield assert_almost_equal, dist.max(), 1.54531407988
+    assert_almost_equal(dist.min(), 0.00270632938683)
+    assert_almost_equal(dist.max(), 1.54531407988)
 
     # Then grid-like data
     data = ds.index.grids[0]
@@ -58,11 +58,9 @@
     center = np.tile(np.reshape(np.array(c), (positions.shape[0],)+n_tup),(1,)+positions.shape[1:])
     
     dist = periodic_dist(positions, center, period, ds.periodicity)
-    yield assert_almost_equal, dist.min(), 0.00270632938683
-    yield assert_almost_equal, dist.max(), 0.863319074398
+    assert_almost_equal(dist.min(), 0.00270632938683)
+    assert_almost_equal(dist.max(), 0.863319074398)
     
     dist = euclidean_dist(positions, center)
-    yield assert_almost_equal, dist.min(), 0.00270632938683
-    yield assert_almost_equal, dist.max(), 1.54531407988
-
-
+    assert_almost_equal(dist.min(), 0.00270632938683)
+    assert_almost_equal(dist.max(), 1.54531407988)

diff -r 4c384ace62dcc2dbd5c2ad69317d43fe1ff7c532 -r a7792a531e66950baf09b5727bb627e8bb193040 yt/utilities/tests/test_selectors.py
--- a/yt/utilities/tests/test_selectors.py
+++ b/yt/utilities/tests/test_selectors.py
@@ -54,7 +54,7 @@
                              ds.domain_right_edge-ds.domain_left_edge,
                              ds.periodicity)
         # WARNING: this value has not been externally verified
-        yield assert_array_less, dist, 0.25
+        assert_array_less(dist, 0.25)
 
 def test_ellipsoid_selector():
     # generate fake data with a number of non-cubical grids
@@ -85,7 +85,7 @@
                              ds.domain_right_edge-ds.domain_left_edge,
                              ds.periodicity)
         # WARNING: this value has not been externally verified
-        yield assert_array_less, dist, ratios[0]
+        assert_array_less(dist, ratios[0])
 
     # aligned ellipsoid tests
     ratios = [0.25, 0.1, 0.1]
@@ -104,7 +104,7 @@
                                    ds.domain_right_edge-ds.domain_left_edge,
                                    ds.periodicity)/ratios[i])**2
         # WARNING: this value has not been externally verified
-        yield assert_array_less, dist2, 1.0
+        assert_array_less(dist2, 1.0)
 
 def test_slice_selector():
     # generate fake data with a number of non-cubical grids
@@ -116,9 +116,9 @@
             data = ds.slice(i, coord)
             data.get_data()
             v = data[d].to_ndarray()
-            yield assert_equal, data.shape[0], 64**2
-            yield assert_equal, data["ones"].shape[0], 64**2
-            yield assert_array_less, np.abs(v - coord), 1./128.+1e-6
+            assert_equal(data.shape[0], 64**2)
+            assert_equal(data["ones"].shape[0], 64**2)
+            assert_array_less(np.abs(v - coord), 1./128.+1e-6)
 
 def test_cutting_plane_selector():
     # generate fake data with a number of non-cubical grids
@@ -144,7 +144,7 @@
             cells1 = np.lexsort((data['x'],data['y'],data['z']))
             cells2 = np.lexsort((data2['x'],data2['y'],data2['z']))
             for d2 in 'xyz':
-                yield assert_equal, data[d2][cells1], data2[d2][cells2]
+                assert_equal(data[d2][cells1], data2[d2][cells2])
 
 #def test_region_selector():
 #


https://bitbucket.org/yt_analysis/yt/commits/d7bb28884e71/
Changeset:   d7bb28884e71
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 20:07:14+00:00
Summary:     eliminating yield assert from yt.analysis_modules
Affected #:  3 files

diff -r a7792a531e66950baf09b5727bb627e8bb193040 -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -301,7 +301,7 @@
 def test_voigt_profiles():
     a = 1.7e-4
     x = np.linspace(5.0, -3.6, 60)
-    yield assert_allclose_units, voigt_old(a, x), voigt_scipy(a, x), 1e-8
+    assert_allclose_units(voigt_old(a, x), voigt_scipy(a, x), 1e-8)
 
 @requires_file(GIZMO_PLUS)
 @requires_answer_testing()

diff -r a7792a531e66950baf09b5727bb627e8bb193040 -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 yt/analysis_modules/ppv_cube/tests/test_ppv.py
--- a/yt/analysis_modules/ppv_cube/tests/test_ppv.py
+++ b/yt/analysis_modules/ppv_cube/tests/test_ppv.py
@@ -46,7 +46,7 @@
     a = cube.data.mean(axis=(0,1)).v
     b = dv*np.exp(-((cube.vmid+v_shift)/v_th)**2)/(np.sqrt(np.pi)*v_th)
 
-    yield assert_allclose_units, a, b, 1.0e-2
+    assert_allclose_units(a, b, 1.0e-2)
 
     E_0 = 6.8*u.keV
 
@@ -58,4 +58,4 @@
 
     c = dE*np.exp(-((cube.vmid-E_shift)/delta_E)**2)/(np.sqrt(np.pi)*delta_E)
 
-    yield assert_allclose_units, a, c, 1.0e-2
+    assert_allclose_units(a, c, 1.0e-2)

diff -r a7792a531e66950baf09b5727bb627e8bb193040 -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -113,7 +113,8 @@
     deltaI = np.zeros((3,nx,ny))
     for i in range(3):
         deltaI[i,:,:] = full_szpack3d(ds, xinit[i])
-        yield assert_almost_equal, deltaI[i,:,:], np.array(szprj["%d_GHz" % int(freqs[i])]), 6
+        assert_almost_equal(
+            deltaI[i,:,:], np.array(szprj["%d_GHz" % int(freqs[i])]), 6)
 
 M7 = "DD0010/moving7_0010"
 @requires_module("SZpack")


https://bitbucket.org/yt_analysis/yt/commits/c05e448dfa7c/
Changeset:   c05e448dfa7c
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 20:19:45+00:00
Summary:     eliminate yield assert from yt.frontends
Affected #:  22 files

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/art/tests/test_outputs.py
--- a/yt/frontends/art/tests/test_outputs.py
+++ b/yt/frontends/art/tests/test_outputs.py
@@ -41,7 +41,7 @@
 def test_d9p():
     ds = data_dir_load(d9p)
     ds.index
-    yield assert_equal, str(ds), "10MpcBox_HartGal_csf_a0.500.d"
+    assert_equal(str(ds), "10MpcBox_HartGal_csf_a0.500.d")
     dso = [None, ("sphere", ("max", (0.1, 'unitary')))]
     for field in _fields:
         for axis in [0, 1, 2]:
@@ -56,18 +56,18 @@
     ad = ds.all_data()
     # 'Ana' variable values output from the ART Fortran 'ANA' analysis code
     AnaNStars = 6255
-    yield assert_equal, ad[('stars', 'particle_type')].size, AnaNStars
-    yield assert_equal, ad[('specie4', 'particle_type')].size, AnaNStars
+    assert_equal(ad[('stars', 'particle_type')].size, AnaNStars)
+    assert_equal(ad[('specie4', 'particle_type')].size, AnaNStars)
 
     # The *real* asnwer is 2833405, but yt misses one particle since it lives
     # on a domain boundary. See issue 814. When that is fixed, this test
     # will need to be updated
     AnaNDM = 2833404
-    yield assert_equal, ad[('darkmatter', 'particle_type')].size, AnaNDM
-    yield assert_equal, (ad[('specie0', 'particle_type')].size +
-                         ad[('specie1', 'particle_type')].size +
-                         ad[('specie2', 'particle_type')].size +
-                         ad[('specie3', 'particle_type')].size), AnaNDM
+    assert_equal(ad[('darkmatter', 'particle_type')].size, AnaNDM)
+    assert_equal((ad[('specie0', 'particle_type')].size +
+                  ad[('specie1', 'particle_type')].size +
+                  ad[('specie2', 'particle_type')].size +
+                  ad[('specie3', 'particle_type')].size), AnaNDM)
 
     for spnum in range(5):
         npart_read = ad['specie%s' % spnum, 'particle_type'].size
@@ -81,34 +81,33 @@
     AnaVolume = YTQuantity(364.640074656, 'Mpc**3')
     Volume = 1
     for i in ds.domain_width.in_units('Mpc'):
-        yield assert_almost_equal, i, AnaBoxSize
+        assert_almost_equal(i, AnaBoxSize)
         Volume *= i
-    yield assert_almost_equal, Volume, AnaVolume
+    assert_almost_equal(Volume, AnaVolume)
 
     AnaNCells = 4087490
-    yield assert_equal, len(ad[('index', 'cell_volume')]), AnaNCells
+    assert_equal(len(ad[('index', 'cell_volume')]), AnaNCells)
 
     AnaTotDMMass = YTQuantity(1.01191786808255e+14, 'Msun')
-    yield (assert_almost_equal,
-           ad[('darkmatter', 'particle_mass')].sum().in_units('Msun'),
-           AnaTotDMMass)
+    assert_almost_equal(
+        ad[('darkmatter', 'particle_mass')].sum().in_units('Msun'),
+        AnaTotDMMass)
 
     AnaTotStarMass = YTQuantity(1776701.3990607238, 'Msun')
-    yield (assert_almost_equal,
-           ad[('stars', 'particle_mass')].sum().in_units('Msun'),
-           AnaTotStarMass)
+    assert_almost_equal(ad[('stars', 'particle_mass')].sum().in_units('Msun'),
+                        AnaTotStarMass)
 
     AnaTotStarMassInitial = YTQuantity(2423468.2801332865, 'Msun')
-    yield (assert_almost_equal,
-           ad[('stars', 'particle_mass_initial')].sum().in_units('Msun'),
-           AnaTotStarMassInitial)
+    assert_almost_equal(
+        ad[('stars', 'particle_mass_initial')].sum().in_units('Msun'),
+        AnaTotStarMassInitial)
 
     AnaTotGasMass = YTQuantity(1.7826982029216785e+13, 'Msun')
-    yield (assert_almost_equal, ad[('gas', 'cell_mass')].sum().in_units('Msun'),
-           AnaTotGasMass)
+    assert_almost_equal(ad[('gas', 'cell_mass')].sum().in_units('Msun'),
+                        AnaTotGasMass)
 
     AnaTotTemp = YTQuantity(150219844793.39072, 'K')  # just leaves
-    yield assert_equal, ad[('gas', 'temperature')].sum(), AnaTotTemp
+    assert_equal(ad[('gas', 'temperature')].sum(), AnaTotTemp)
 
 
 @requires_file(d9p)

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -34,7 +34,7 @@
 def test_sizmbhloz():
     ds = data_dir_load(sizmbhloz)
     ds.max_range = 1024*1024
-    yield assert_equal, str(ds), "sizmbhloz-clref04SNth-rs9_a0.9011.art"
+    assert_equal(str(ds), "sizmbhloz-clref04SNth-rs9_a0.9011.art")
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
     for dobj_name in dso:
         for field in _fields:
@@ -47,7 +47,7 @@
         dobj = create_obj(ds, dobj_name)
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
-        yield assert_equal, s1, s2
+        assert_equal(s1, s2)
     assert_equal(ds.particle_type_counts, {'N-BODY': 100000, 'STAR': 110650})
 
 

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -33,7 +33,7 @@
 @requires_ds(cloud)
 def test_cloud():
     ds = data_dir_load(cloud)
-    yield assert_equal, str(ds), "Cloud.0050"
+    assert_equal(str(ds), "Cloud.0050")
     for test in small_patch_amr(ds, _fields_cloud):
         test_cloud.__name__ = test.description
         yield test
@@ -44,7 +44,7 @@
 @requires_ds(blast)
 def test_blast():
     ds = data_dir_load(blast)
-    yield assert_equal, str(ds), "Blast.0100"
+    assert_equal(str(ds), "Blast.0100")
     for test in small_patch_amr(ds, _fields_blast):
         test_blast.__name__ = test.description
         yield test
@@ -73,7 +73,7 @@
 @requires_ds(stripping, big_data=True)
 def test_stripping():
     ds = data_dir_load(stripping, kwargs={"units_override":uo_stripping})
-    yield assert_equal, str(ds), "rps.0062"
+    assert_equal(str(ds), "rps.0062")
     for test in small_patch_amr(ds, _fields_stripping):
         test_stripping.__name__ = test.description
         yield test
@@ -100,12 +100,16 @@
     assert_equal(ds3.time_unit, u.Myr)
     assert_equal(ds3.mass_unit, 1e14*u.Msun)
 
-    yield assert_equal, sp1.quantities.extrema("pressure"), sp2.quantities.extrema("pressure")
-    yield assert_allclose_units, sp1.quantities.total_quantity("pressure"), sp2.quantities.total_quantity("pressure")
+    assert_equal(sp1.quantities.extrema("pressure"),
+                 sp2.quantities.extrema("pressure"))
+    assert_allclose_units(sp1.quantities.total_quantity("pressure"),
+                          sp2.quantities.total_quantity("pressure"))
     for ax in "xyz":
-        yield assert_equal, sp1.quantities.extrema("velocity_%s" % ax), sp2.quantities.extrema("velocity_%s" % ax)
-    yield assert_allclose_units, sp1.quantities.bulk_velocity(), sp2.quantities.bulk_velocity()
-    yield assert_equal, prj1["density"], prj2["density"]
+        assert_equal(sp1.quantities.extrema("velocity_%s" % ax),
+                     sp2.quantities.extrema("velocity_%s" % ax))
+    assert_allclose_units(sp1.quantities.bulk_velocity(),
+                          sp2.quantities.bulk_velocity())
+    assert_equal(prj1["density"], prj2["density"])
 
     ytcfg["yt","skip_dataset_cache"] = "False"
 

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/boxlib/tests/test_outputs.py
--- a/yt/frontends/boxlib/tests/test_outputs.py
+++ b/yt/frontends/boxlib/tests/test_outputs.py
@@ -39,7 +39,7 @@
 @requires_ds(radadvect)
 def test_radadvect():
     ds = data_dir_load(radadvect)
-    yield assert_equal, str(ds), "plt00000"
+    assert_equal(str(ds), "plt00000")
     for test in small_patch_amr(ds, _orion_fields):
         test_radadvect.__name__ = test.description
         yield test
@@ -48,7 +48,7 @@
 @requires_ds(rt)
 def test_radtube():
     ds = data_dir_load(rt)
-    yield assert_equal, str(ds), "plt00500"
+    assert_equal(str(ds), "plt00500")
     for test in small_patch_amr(ds, _orion_fields):
         test_radtube.__name__ = test.description
         yield test
@@ -57,7 +57,7 @@
 @requires_ds(star)
 def test_star():
     ds = data_dir_load(star)
-    yield assert_equal, str(ds), "plrd01000"
+    assert_equal(str(ds), "plrd01000")
     for test in small_patch_amr(ds, _orion_fields):
         test_star.__name__ = test.description
         yield test
@@ -66,7 +66,7 @@
 @requires_ds(LyA)
 def test_LyA():
     ds = data_dir_load(LyA)
-    yield assert_equal, str(ds), "plt00000"
+    assert_equal(str(ds), "plt00000")
     for test in small_patch_amr(ds, _nyx_fields,
                                 input_center="c",
                                 input_weight="Ne"):
@@ -110,7 +110,7 @@
 @requires_ds(RT_particles)
 def test_RT_particles():
     ds = data_dir_load(RT_particles)
-    yield assert_equal, str(ds), "plt00050"
+    assert_equal(str(ds), "plt00050")
     for test in small_patch_amr(ds, _castro_fields):
         test_RT_particles.__name__ = test.description
         yield test
@@ -148,7 +148,7 @@
 @requires_ds(langmuir)
 def test_langmuir():
     ds = data_dir_load(langmuir)
-    yield assert_equal, str(ds), "plt00020_v2"
+    assert_equal(str(ds), "plt00020_v2")
     for test in small_patch_amr(ds, _warpx_fields, 
                                 input_center="c",
                                 input_weight="Ex"):
@@ -159,7 +159,7 @@
 @requires_ds(plasma)
 def test_plasma():
     ds = data_dir_load(plasma)
-    yield assert_equal, str(ds), "plt00030_v2"
+    assert_equal(str(ds), "plt00030_v2")
     for test in small_patch_amr(ds, _warpx_fields,
                                 input_center="c",
                                 input_weight="Ex"):

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -33,7 +33,7 @@
 @requires_ds(gc)
 def test_gc():
     ds = data_dir_load(gc)
-    yield assert_equal, str(ds), "data.0077.3d.hdf5"
+    assert_equal(str(ds), "data.0077.3d.hdf5")
     for test in small_patch_amr(ds, _fields):
         test_gc.__name__ = test.description
         yield test
@@ -42,7 +42,7 @@
 @requires_ds(tb)
 def test_tb():
     ds = data_dir_load(tb)
-    yield assert_equal, str(ds), "data.0005.3d.hdf5"
+    assert_equal(str(ds), "data.0005.3d.hdf5")
     for test in small_patch_amr(ds, _fields):
         test_tb.__name__ = test.description
         yield test
@@ -51,7 +51,7 @@
 @requires_ds(iso)
 def test_iso():
     ds = data_dir_load(iso)
-    yield assert_equal, str(ds), "data.0000.3d.hdf5"
+    assert_equal(str(ds), "data.0000.3d.hdf5")
     for test in small_patch_amr(ds, _fields):
         test_iso.__name__ = test.description
         yield test
@@ -61,7 +61,7 @@
 @requires_ds(zp)
 def test_zp():
     ds = data_dir_load(zp)
-    yield assert_equal, str(ds), "plt32.2d.hdf5"
+    assert_equal(str(ds), "plt32.2d.hdf5")
     for test in small_patch_amr(ds, _zp_fields, input_center="c",
                                 input_weight="rhs"):
         test_zp.__name__ = test.description
@@ -71,7 +71,7 @@
 @requires_ds(kho)
 def test_kho():
     ds = data_dir_load(kho)
-    yield assert_equal, str(ds), "data.0004.hdf5"
+    assert_equal(str(ds), "data.0004.hdf5")
     for test in small_patch_amr(ds, _fields):
         test_kho.__name__ = test.description
         yield test

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -71,7 +71,7 @@
 @requires_ds(m7)
 def test_moving7():
     ds = data_dir_load(m7)
-    yield assert_equal, str(ds), "moving7_0010"
+    assert_equal(str(ds), "moving7_0010")
     for test in small_patch_amr(m7, _fields):
         test_moving7.__name__ = test.description
         yield test
@@ -80,7 +80,7 @@
 def test_galaxy0030():
     ds = data_dir_load(g30)
     yield check_color_conservation(ds)
-    yield assert_equal, str(ds), "galaxy0030"
+    assert_equal(str(ds), "galaxy0030")
     for test in big_patch_amr(ds, _fields):
         test_galaxy0030.__name__ = test.description
         yield test
@@ -129,11 +129,11 @@
 def test_nuclei_density_fields():
     ds = data_dir_load(ecp)
     ad = ds.all_data()
-    yield assert_array_equal, ad["H_nuclei_density"], \
-      (ad["H_number_density"] + ad["H_p1_number_density"])
-    yield assert_array_equal, ad["He_nuclei_density"], \
-      (ad["He_number_density"] + ad["He_p1_number_density"] +
-       ad["He_p2_number_density"])
+    assert_array_equal(ad["H_nuclei_density"],
+                       (ad["H_number_density"] + ad["H_p1_number_density"]))
+    assert_array_equal(ad["He_nuclei_density"],
+        (ad["He_number_density"] +
+         ad["He_p1_number_density"] + ad["He_p2_number_density"]))
 
 @requires_file(enzotiny)
 def test_EnzoDataset():

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/exodus_ii/tests/test_outputs.py
--- a/yt/frontends/exodus_ii/tests/test_outputs.py
+++ b/yt/frontends/exodus_ii/tests/test_outputs.py
@@ -41,12 +41,12 @@
                   ('connect2', 'conv_marker'),
                   ('connect2', 'convected'),
                   ('connect2', 'diffused')]
-    yield assert_equal, str(ds), "out.e"
-    yield assert_equal, ds.dimensionality, 3
-    yield assert_equal, ds.current_time, 0.0
-    yield assert_array_equal, ds.parameters['nod_names'], ['convected', 'diffused']
-    yield assert_equal, ds.parameters['num_meshes'], 2
-    yield assert_array_equal, ds.field_list, field_list
+    assert_equal(str(ds), "out.e")
+    assert_equal(ds.dimensionality, 3)
+    assert_equal(ds.current_time, 0.0)
+    assert_array_equal(ds.parameters['nod_names'], ['convected', 'diffused'])
+    assert_equal(ds.parameters['num_meshes'], 2)
+    assert_array_equal(ds.field_list, field_list)
 
 out_s002 = "ExodusII/out.e-s002"
 
@@ -66,10 +66,10 @@
                   ('connect2', 'conv_marker'),
                   ('connect2', 'convected'),
                   ('connect2', 'diffused')]
-    yield assert_equal, str(ds), "out.e-s002"
-    yield assert_equal, ds.dimensionality, 3
-    yield assert_equal, ds.current_time, 2.0
-    yield assert_array_equal, ds.field_list, field_list
+    assert_equal(str(ds), "out.e-s002")
+    assert_equal(ds.dimensionality, 3)
+    assert_equal(ds.current_time, 2.0)
+    assert_array_equal(ds.field_list, field_list)
 
 gold = "ExodusII/gold.e"
 
@@ -78,8 +78,8 @@
 def test_gold():
     ds = data_dir_load(gold)
     field_list = [('all', 'forced'), ('connect1', 'forced')]
-    yield assert_equal, str(ds), "gold.e"
-    yield assert_array_equal, ds.field_list, field_list
+    assert_equal(str(ds), "gold.e")
+    assert_array_equal(ds.field_list, field_list)
 
 big_data = "MOOSE_sample_data/mps_out.e"
 

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/fits/tests/test_outputs.py
--- a/yt/frontends/fits/tests/test_outputs.py
+++ b/yt/frontends/fits/tests/test_outputs.py
@@ -29,7 +29,7 @@
 @requires_ds(grs)
 def test_grs():
     ds = data_dir_load(grs, cls=FITSDataset, kwargs={"nan_mask":0.0})
-    yield assert_equal, str(ds), "grs-50-cube.fits"
+    assert_equal(str(ds), "grs-50-cube.fits")
     for test in small_patch_amr(ds, _fields_grs, input_center="c", input_weight="ones"):
         test_grs.__name__ = test.description
         yield test
@@ -40,7 +40,7 @@
 @requires_ds(vf)
 def test_velocity_field():
     ds = data_dir_load(vf, cls=FITSDataset)
-    yield assert_equal, str(ds), "velocity_field_20.fits"
+    assert_equal(str(ds), "velocity_field_20.fits")
     for test in small_patch_amr(ds, _fields_vels, input_center="c", input_weight="ones"):
         test_velocity_field.__name__ = test.description
         yield test

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -33,7 +33,7 @@
 @requires_ds(sloshing, big_data=True)
 def test_sloshing():
     ds = data_dir_load(sloshing)
-    yield assert_equal, str(ds), "sloshing_low_res_hdf5_plt_cnt_0300"
+    assert_equal(str(ds), "sloshing_low_res_hdf5_plt_cnt_0300")
     for test in small_patch_amr(ds, _fields):
         test_sloshing.__name__ = test.description
         yield test
@@ -44,7 +44,7 @@
 @requires_ds(wt)
 def test_wind_tunnel():
     ds = data_dir_load(wt)
-    yield assert_equal, str(ds), "windtunnel_4lev_hdf5_plt_cnt_0030"
+    assert_equal(str(ds), "windtunnel_4lev_hdf5_plt_cnt_0030")
     for test in small_patch_amr(ds, _fields_2d):
         test_wind_tunnel.__name__ = test.description
         yield test

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -69,7 +69,7 @@
 
     # Test that all subhalo particles are contained within
     # their parent group.
-    yield assert_equal, total_sub, total_int
+    assert_equal(total_sub, total_int)
 
 @requires_file(g298)
 def test_halo_masses():
@@ -85,7 +85,7 @@
         # Check that masses from halo containers are the same
         # as the array of all masses.  This will test getting
         # scalar fields for halos correctly.
-        yield assert_array_equal, ad[ptype, "particle_mass"], mass
+        assert_array_equal(ad[ptype, "particle_mass"], mass)
 
 # fof/subhalo catalog with no member ids in first file
 g56 = "gadget_halos/data/groups_056/fof_subhalo_tab_056.0.hdf5"

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/gamer/tests/test_outputs.py
--- a/yt/frontends/gamer/tests/test_outputs.py
+++ b/yt/frontends/gamer/tests/test_outputs.py
@@ -34,7 +34,7 @@
 @requires_ds(jet, big_data=True)
 def test_jet():
     ds = data_dir_load(jet, kwargs={"units_override":jet_units})
-    yield assert_equal, str(ds), "jet_000002"
+    assert_equal(str(ds), "jet_000002")
     for test in small_patch_amr(ds, _fields_jet):
         test_jet.__name__ = test.description
         yield test
@@ -46,7 +46,7 @@
 @requires_ds(psiDM, big_data=True)
 def test_psiDM():
     ds = data_dir_load(psiDM)
-    yield assert_equal, str(ds), "psiDM_000020"
+    assert_equal(str(ds), "psiDM_000020")
     for test in small_patch_amr(ds, _fields_psiDM):
         test_psiDM.__name__ = test.description
         yield test
@@ -58,7 +58,7 @@
 @requires_ds(plummer, big_data=True)
 def test_plummer():
     ds = data_dir_load(plummer)
-    yield assert_equal, str(ds), "plummer_000000"
+    assert_equal(str(ds), "plummer_000000")
     for test in small_patch_amr(ds, _fields_plummer):
         test_plummer.__name__ = test.description
         yield test

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/gdf/tests/test_outputs.py
--- a/yt/frontends/gdf/tests/test_outputs.py
+++ b/yt/frontends/gdf/tests/test_outputs.py
@@ -30,7 +30,7 @@
 @requires_ds(sedov)
 def test_sedov_tunnel():
     ds = data_dir_load(sedov)
-    yield assert_equal, str(ds), "sedov_tst_0004"
+    assert_equal(str(ds), "sedov_tst_0004")
     for test in small_patch_amr(ds, _fields):
         test_sedov_tunnel.__name__ = test.description
         yield test

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/moab/tests/test_c5.py
--- a/yt/frontends/moab/tests/test_c5.py
+++ b/yt/frontends/moab/tests/test_c5.py
@@ -35,26 +35,26 @@
 def test_cantor_5():
     np.random.seed(0x4d3d3d3)
     ds = data_dir_load(c5)
-    yield assert_equal, str(ds), "c5"
+    assert_equal(str(ds), "c5")
     dso = [ None, ("sphere", ("c", (0.1, 'unitary'))),
                   ("sphere", ("c", (0.2, 'unitary')))]
     dd = ds.all_data()
-    yield assert_almost_equal, ds.index.get_smallest_dx(), 0.00411522633744843, 10
-    yield assert_equal, dd["x"].shape[0], 63*63*63
-    yield assert_almost_equal, \
-        dd["cell_volume"].in_units("code_length**3").sum(dtype="float64").d, \
-        1.0, 10
+    assert_almost_equal(ds.index.get_smallest_dx(), 0.00411522633744843, 10)
+    assert_equal(dd["x"].shape[0], 63*63*63)
+    assert_almost_equal(
+        dd["cell_volume"].in_units("code_length**3").sum(dtype="float64").d,
+        1.0, 10)
     for offset_1 in [1e-9, 1e-4, 0.1]:
         for offset_2 in [1e-9, 1e-4, 0.1]:
             DLE = ds.domain_left_edge
             DRE = ds.domain_right_edge
             ray = ds.ray(DLE + offset_1 * DLE.uq,
                          DRE - offset_2 * DRE.uq)
-            yield assert_almost_equal, ray["dts"].sum(dtype="float64"), 1.0, 8
+            assert_almost_equal(ray["dts"].sum(dtype="float64"), 1.0, 8)
     for i, p1 in enumerate(np.random.random((5, 3))):
         for j, p2 in enumerate(np.random.random((5, 3))):
             ray = ds.ray(p1, p2)
-            yield assert_almost_equal, ray["dts"].sum(dtype="float64"), 1.0, 8
+            assert_almost_equal(ray["dts"].sum(dtype="float64"), 1.0, 8)
     for field in _fields:
         for dobj_name in dso:
             yield FieldValuesTest(c5, field, dobj_name)

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/open_pmd/tests/test_outputs.py
--- a/yt/frontends/open_pmd/tests/test_outputs.py
+++ b/yt/frontends/open_pmd/tests/test_outputs.py
@@ -64,14 +64,16 @@
     domain_width = [2.08e-05, 2.08e-05, 2.01e-05] * np.ones_like(ds.domain_left_edge)
 
     assert isinstance(ds, OpenPMDDataset)
-    yield assert_equal, str(ds), "data00000100.h5"
-    yield assert_equal, ds.dimensionality, 3
-    yield assert_equal, ds.particle_types_raw, ('io',)
+    assert_equal(str(ds), "data00000100.h5")
+    assert_equal(ds.dimensionality, 3)
+    assert_equal(ds.particle_types_raw, ('io',))
     assert "all" in ds.particle_unions
-    yield assert_array_equal, ds.field_list, field_list
-    yield assert_array_equal, ds.domain_dimensions, domain_dimensions
-    yield assert_almost_equal, ds.current_time, 3.28471214521e-14 * np.ones_like(ds.current_time)
-    yield assert_almost_equal, ds.domain_right_edge - ds.domain_left_edge, domain_width
+    assert_array_equal(ds.field_list, field_list)
+    assert_array_equal(ds.domain_dimensions, domain_dimensions)
+    assert_almost_equal(ds.current_time,
+                        3.28471214521e-14 * np.ones_like(ds.current_time))
+    assert_almost_equal(ds.domain_right_edge - ds.domain_left_edge,
+                        domain_width)
 
 
 @requires_file(twoD)
@@ -127,11 +129,13 @@
     domain_width = [3.06e-05, 2.01e-05, 1e+0] * np.ones_like(ds.domain_left_edge)
 
     assert isinstance(ds, OpenPMDDataset)
-    yield assert_equal, str(ds), "data00000100.h5"
-    yield assert_equal, ds.dimensionality, 2
-    yield assert_equal, ds.particle_types_raw, ('Hydrogen1+', 'electrons')
+    assert_equal(str(ds), "data00000100.h5")
+    assert_equal(ds.dimensionality, 2)
+    assert_equal(ds.particle_types_raw, ('Hydrogen1+', 'electrons'))
     assert "all" in ds.particle_unions
-    yield assert_array_equal, ds.field_list, field_list
-    yield assert_array_equal, ds.domain_dimensions, domain_dimensions
-    yield assert_almost_equal, ds.current_time, 3.29025596712e-14 * np.ones_like(ds.current_time)
-    yield assert_almost_equal, ds.domain_right_edge - ds.domain_left_edge, domain_width
+    assert_array_equal(ds.field_list, field_list)
+    assert_array_equal(ds.domain_dimensions, domain_dimensions)
+    assert_almost_equal(ds.current_time,
+                        3.29025596712e-14 * np.ones_like(ds.current_time))
+    assert_almost_equal(ds.domain_right_edge - ds.domain_left_edge,
+                        domain_width)

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/owls_subfind/tests/test_outputs.py
--- a/yt/frontends/owls_subfind/tests/test_outputs.py
+++ b/yt/frontends/owls_subfind/tests/test_outputs.py
@@ -34,7 +34,7 @@
 @requires_ds(g8)
 def test_fields_g8():
     ds = data_dir_load(g8)
-    yield assert_equal, str(ds), os.path.basename(g8)
+    assert_equal(str(ds), os.path.basename(g8))
     for field in _fields:
         yield FieldValuesTest(g8, field, particle_type=True)
 
@@ -42,7 +42,7 @@
 @requires_ds(g1)
 def test_fields_g1():
     ds = data_dir_load(g1)
-    yield assert_equal, str(ds), os.path.basename(g1)
+    assert_equal(str(ds), os.path.basename(g1))
     for field in _fields:
         yield FieldValuesTest(g1, field, particle_type=True)
 

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -35,7 +35,7 @@
 @requires_ds(output_00080)
 def test_output_00080():
     ds = data_dir_load(output_00080)
-    yield assert_equal, str(ds), "info_00080"
+    assert_equal(str(ds), "info_00080")
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
     for dobj_name in dso:
         for field in _fields:
@@ -48,7 +48,7 @@
         dobj = create_obj(ds, dobj_name)
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
-        yield assert_equal, s1, s2
+        assert_equal(s1, s2)
     assert_equal(ds.particle_type_counts, {'io': 1090895})
 
 @requires_file(output_00080)
@@ -67,7 +67,7 @@
     ds = yt.load(os.path.join(ramsesNonCosmo, 'info_00002.txt'))
 
     expected_raw_time = 0.0299468077820411 # in ramses unit
-    yield assert_equal, ds.current_time.value, expected_raw_time
+    assert_equal(ds.current_time.value, expected_raw_time)
 
     expected_time = 14087886140997.336 # in seconds
     assert_equal(ds.current_time.in_units('s').value, expected_time)

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/rockstar/tests/test_outputs.py
--- a/yt/frontends/rockstar/tests/test_outputs.py
+++ b/yt/frontends/rockstar/tests/test_outputs.py
@@ -31,7 +31,7 @@
 @requires_ds(r1)
 def test_fields_r1():
     ds = data_dir_load(r1)
-    yield assert_equal, str(ds), os.path.basename(r1)
+    assert_equal(str(ds), os.path.basename(r1))
     for field in _fields:
         yield FieldValuesTest(r1, field, particle_type=True)
 

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/sdf/tests/test_outputs.py
--- a/yt/frontends/sdf/tests/test_outputs.py
+++ b/yt/frontends/sdf/tests/test_outputs.py
@@ -41,7 +41,7 @@
     if not internet_on():
         return
     ds = SDFDataset(scivis_data)
-    yield assert_equal, str(ds), "ds14_scivis_0128_e4_dt04_1.0000"
+    assert_equal(str(ds), "ds14_scivis_0128_e4_dt04_1.0000")
     ad = ds.all_data()
     assert np.unique(ad['particle_position_x']).size > 1
     ProjectionPlot(ds, "z", _fields)

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/stream/tests/test_stream_amrgrids.py
--- a/yt/frontends/stream/tests/test_stream_amrgrids.py
+++ b/yt/frontends/stream/tests/test_stream_amrgrids.py
@@ -26,7 +26,7 @@
     def make_proj():
         p = ProjectionPlot(spf, 'x', ["density"], center='c', origin='native')
         return p
-    yield assert_raises, YTIntDomainOverflow, make_proj
+    assert_raises(YTIntDomainOverflow, make_proj)
 
 def test_refine_by():
     grid_data = []

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/stream/tests/test_stream_hexahedral.py
--- a/yt/frontends/stream/tests/test_stream_hexahedral.py
+++ b/yt/frontends/stream/tests/test_stream_hexahedral.py
@@ -36,8 +36,8 @@
     ds = load_hexahedral_mesh(data, conn, coords, bbox=bbox)
     dd = ds.all_data()
     #raise RuntimeError
-    yield assert_almost_equal, float(dd["cell_volume"].sum(dtype="float64")), 1.0
-    yield assert_equal, dd["ones"].size, Nx * Ny * Nz
+    assert_almost_equal(float(dd["cell_volume"].sum(dtype="float64")), 1.0)
+    assert_equal(dd["ones"].size, Nx * Ny * Nz)
     # Now we try it with a standard mesh
     cell_x = np.linspace(0.0, 1.0, Nx+1)
     cell_y = np.linspace(0.0, 1.0, Ny+1)
@@ -47,11 +47,11 @@
     bbox = np.array([ [0.0, 1.0], [0.0, 1.0], [0.0, 1.0] ])
     ds = load_hexahedral_mesh(data, conn, coords, bbox=bbox)
     dd = ds.all_data()
-    yield assert_almost_equal, float(dd["cell_volume"].sum(dtype="float64")), 1.0
-    yield assert_equal, dd["ones"].size, Nx * Ny * Nz
-    yield assert_almost_equal, dd["dx"].to_ndarray(), 1.0/Nx
-    yield assert_almost_equal, dd["dy"].to_ndarray(), 1.0/Ny
-    yield assert_almost_equal, dd["dz"].to_ndarray(), 1.0/Nz
+    assert_almost_equal(float(dd["cell_volume"].sum(dtype="float64")), 1.0)
+    assert_equal(dd["ones"].size, Nx * Ny * Nz)
+    assert_almost_equal(dd["dx"].to_ndarray(), 1.0/Nx)
+    assert_almost_equal(dd["dy"].to_ndarray(), 1.0/Ny)
+    assert_almost_equal(dd["dz"].to_ndarray(), 1.0/Nz)
 
     s = SlicePlot(ds, "x", "random_field")
     s._setup_plots()

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -64,8 +64,8 @@
     number_of_particles1 = np.sum([grid.NumberOfParticles for grid in ug1.index.grids])
     number_of_particles2 = np.sum([grid.NumberOfParticles for grid in ug2.index.grids])
 
-    yield assert_equal, number_of_particles1, num_particles
-    yield assert_equal, number_of_particles1, number_of_particles2
+    assert_equal(number_of_particles1, num_particles)
+    assert_equal(number_of_particles1, number_of_particles2)
 
     # Check to make sure the fields have been defined correctly
 
@@ -87,7 +87,7 @@
 
     amr1 = refine_amr(ug1, rc, fo, 3)
     for field in sorted(ug1.field_list):
-        yield assert_equal, (field in amr1.field_list), True
+        assert_equal((field in amr1.field_list), True)
 
     grid_data = []
 
@@ -112,8 +112,8 @@
     number_of_particles1 = [grid.NumberOfParticles for grid in amr1.index.grids]
     number_of_particles2 = [grid.NumberOfParticles for grid in amr2.index.grids]
 
-    yield assert_equal, np.sum(number_of_particles1), num_particles
-    yield assert_equal, number_of_particles1, number_of_particles2
+    assert_equal(np.sum(number_of_particles1), num_particles)
+    assert_equal(number_of_particles1, number_of_particles2)
 
     assert amr1._get_field_info("all", "particle_position_x").particle_type
     assert amr1._get_field_info("all", "particle_position_y").particle_type

diff -r d7bb28884e714cd5a9ce03a833c6dc4d756963a1 -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 yt/frontends/tipsy/tests/test_outputs.py
--- a/yt/frontends/tipsy/tests/test_outputs.py
+++ b/yt/frontends/tipsy/tests/test_outputs.py
@@ -45,13 +45,13 @@
                   unit_base = {'length': (60.0, "Mpccm/h")},
                   n_ref = 64)
     ds = data_dir_load(pkdgrav, TipsyDataset, (), kwargs)
-    yield assert_equal, str(ds), "halo1e11_run1.00400"
+    assert_equal(str(ds), "halo1e11_run1.00400")
     dso = [ None, ("sphere", ("c", (0.3, 'unitary')))]
     dd = ds.all_data()
-    yield assert_equal, dd["Coordinates"].shape, (26847360, 3)
+    assert_equal(dd["Coordinates"].shape, (26847360, 3))
     tot = sum(dd[ptype,"Coordinates"].shape[0]
               for ptype in ds.particle_types if ptype != "all")
-    yield assert_equal, tot, 26847360
+    assert_equal(tot, 26847360)
     for dobj_name in dso:
         for field in _fields:
             for axis in [0, 1, 2]:
@@ -63,7 +63,7 @@
         dobj = create_obj(ds, dobj_name)
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
-        yield assert_equal, s1, s2
+        assert_equal(s1, s2)
 
 gasoline_dmonly = "agora_1e11.00400/agora_1e11.00400"
 @requires_ds(gasoline_dmonly, big_data = True, file_check = True)
@@ -76,13 +76,13 @@
                   unit_base = {'length': (60.0, "Mpccm/h")},
                   n_ref = 64)
     ds = data_dir_load(gasoline_dmonly, TipsyDataset, (), kwargs)
-    yield assert_equal, str(ds), "agora_1e11.00400"
+    assert_equal(str(ds), "agora_1e11.00400")
     dso = [ None, ("sphere", ("c", (0.3, 'unitary')))]
     dd = ds.all_data()
-    yield assert_equal, dd["Coordinates"].shape, (10550576, 3)
+    assert_equal(dd["Coordinates"].shape, (10550576, 3))
     tot = sum(dd[ptype,"Coordinates"].shape[0]
               for ptype in ds.particle_types if ptype != "all")
-    yield assert_equal, tot, 10550576
+    assert_equal(tot, 10550576)
     for dobj_name in dso:
         for field in _fields:
             for axis in [0, 1, 2]:
@@ -94,7 +94,7 @@
         dobj = create_obj(ds, dobj_name)
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
-        yield assert_equal, s1, s2
+        assert_equal(s1, s2)
 
 tg_fields = OrderedDict(
     [


https://bitbucket.org/yt_analysis/yt/commits/e96467e8da01/
Changeset:   e96467e8da01
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 20:39:54+00:00
Summary:     remove unnecessary AssertWrapper class
Affected #:  1 file

diff -r c05e448dfa7c42b9decc395cabb8c7442a6a5d37 -r e96467e8da01e92743fbcf40af90f3593d414af9 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -992,21 +992,18 @@
 def sph_answer(ds, ds_str_repr, ds_nparticles, fields):
     if not can_run_ds(ds):
         return
-    yield AssertWrapper("%s_string_representation" % str(ds), assert_equal,
-                        str(ds), ds_str_repr)
+    assert_equal(str(ds), ds_str_repr)
     dso = [None, ("sphere", ("c", (0.1, 'unitary')))]
     dd = ds.all_data()
-    yield AssertWrapper("%s_all_data_part_shape" % str(ds), assert_equal,
-                        dd["particle_position"].shape, (ds_nparticles, 3))
+    assert_equal(dd["particle_position"].shape, (ds_nparticles, 3))
     tot = sum(dd[ptype, "particle_position"].shape[0]
               for ptype in ds.particle_types if ptype != "all")
-    yield AssertWrapper("%s_all_data_part_total" % str(ds), assert_equal,
-                        tot, ds_nparticles)
+    assert_equal(tot, ds_nparticles)
     for dobj_name in dso:
         dobj = create_obj(ds, dobj_name)
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
-        yield AssertWrapper("%s_mask_test" % str(ds), assert_equal, s1, s2)
+        assert_equal(s1, s2)
         for field, weight_field in fields.items():
             if field[0] in ds.particle_types:
                 particle_type = True
@@ -1028,17 +1025,3 @@
     cls = getattr(ds, obj_type[0])
     obj = cls(*obj_type[1])
     return obj
-
-class AssertWrapper(object):
-    """
-    Used to wrap a numpy testing assertion, in order to provide a useful name
-    for a given assertion test.
-    """
-    def __init__(self, description, *args):
-        # The key here is to add a description attribute, which nose will pick
-        # up.
-        self.args = args
-        self.description = description
-
-    def __call__(self):
-        self.args[0](*self.args[1:])


https://bitbucket.org/yt_analysis/yt/commits/7de56413f26a/
Changeset:   7de56413f26a
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 21:04:24+00:00
Summary:     fix yieldless usage of uits_override_check
Affected #:  12 files

diff -r e96467e8da01e92743fbcf40af90f3593d414af9 -r 7de56413f26af88f8d5d9d3c9efceefc99596afc yt/frontends/art/tests/test_outputs.py
--- a/yt/frontends/art/tests/test_outputs.py
+++ b/yt/frontends/art/tests/test_outputs.py
@@ -116,5 +116,4 @@
 
 @requires_file(d9p)
 def test_units_override():
-    for test in units_override_check(d9p):
-        yield test
+    units_override_check(d9p)

diff -r e96467e8da01e92743fbcf40af90f3593d414af9 -r 7de56413f26af88f8d5d9d3c9efceefc99596afc yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -57,5 +57,4 @@
 
 @requires_file(sizmbhloz)
 def test_units_override():
-    for test in units_override_check(sizmbhloz):
-        yield test
+    units_override_check(sizmbhloz)

diff -r e96467e8da01e92743fbcf40af90f3593d414af9 -r 7de56413f26af88f8d5d9d3c9efceefc99596afc yt/frontends/athena_pp/tests/test_outputs.py
--- a/yt/frontends/athena_pp/tests/test_outputs.py
+++ b/yt/frontends/athena_pp/tests/test_outputs.py
@@ -71,8 +71,7 @@
 
 @requires_file(AM06)
 def test_units_override():
-    for test in units_override_check(AM06):
-        yield test
+    units_override_check(AM06)
 
 @requires_file(AM06)
 def test_AthenaPPDataset():

diff -r e96467e8da01e92743fbcf40af90f3593d414af9 -r 7de56413f26af88f8d5d9d3c9efceefc99596afc yt/frontends/boxlib/tests/test_outputs.py
--- a/yt/frontends/boxlib/tests/test_outputs.py
+++ b/yt/frontends/boxlib/tests/test_outputs.py
@@ -223,5 +223,4 @@
 
 @requires_file(rt)
 def test_units_override():
-    for test in units_override_check(rt):
-        yield test
+    units_override_check(rt)

diff -r e96467e8da01e92743fbcf40af90f3593d414af9 -r 7de56413f26af88f8d5d9d3c9efceefc99596afc yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -92,15 +92,12 @@
 
 @requires_file(zp)
 def test_units_override_zp():
-    for test in units_override_check(zp):
-        yield test
+    units_override_check(zp)
 
 @requires_file(gc)
 def test_units_override_gc():
-    for test in units_override_check(gc):
-        yield test
+    units_override_check(gc)
 
 @requires_file(kho)
 def test_units_override_kho():
-    for test in units_override_check(kho):
-        yield test
+    units_override_check(kho)

diff -r e96467e8da01e92743fbcf40af90f3593d414af9 -r 7de56413f26af88f8d5d9d3c9efceefc99596afc yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -122,8 +122,7 @@
 
 @requires_file(enzotiny)
 def test_units_override():
-    for test in units_override_check(enzotiny):
-        yield test
+    units_override_check(enzotiny)
 
 @requires_ds(ecp, big_data=True)
 def test_nuclei_density_fields():

diff -r e96467e8da01e92743fbcf40af90f3593d414af9 -r 7de56413f26af88f8d5d9d3c9efceefc99596afc yt/frontends/fits/tests/test_outputs.py
--- a/yt/frontends/fits/tests/test_outputs.py
+++ b/yt/frontends/fits/tests/test_outputs.py
@@ -47,8 +47,7 @@
 
 @requires_file(vf)
 def test_units_override():
-    for test in units_override_check(vf):
-        yield test
+    units_override_check(vf)
 
 @requires_file(grs)
 def test_FITSDataset():

diff -r e96467e8da01e92743fbcf40af90f3593d414af9 -r 7de56413f26af88f8d5d9d3c9efceefc99596afc yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -55,8 +55,7 @@
 
 @requires_file(sloshing)
 def test_units_override():
-    for test in units_override_check(sloshing):
-        yield test
+    units_override_check(sloshing)
 
 fid_1to3_b1 = "fiducial_1to3_b1/fiducial_1to3_b1_hdf5_part_0080"
 

diff -r e96467e8da01e92743fbcf40af90f3593d414af9 -r 7de56413f26af88f8d5d9d3c9efceefc99596afc yt/frontends/gamer/tests/test_outputs.py
--- a/yt/frontends/gamer/tests/test_outputs.py
+++ b/yt/frontends/gamer/tests/test_outputs.py
@@ -71,5 +71,4 @@
 
 @requires_file(jet)
 def test_units_override():
-    for test in units_override_check(jet):
-        yield test
+    units_override_check(jet)

diff -r e96467e8da01e92743fbcf40af90f3593d414af9 -r 7de56413f26af88f8d5d9d3c9efceefc99596afc yt/frontends/gdf/tests/test_outputs.py
--- a/yt/frontends/gdf/tests/test_outputs.py
+++ b/yt/frontends/gdf/tests/test_outputs.py
@@ -43,5 +43,4 @@
 
 @requires_file(sedov)
 def test_units_override():
-    for test in units_override_check(sedov):
-        yield test
+    units_override_check(sedov)

diff -r e96467e8da01e92743fbcf40af90f3593d414af9 -r 7de56413f26af88f8d5d9d3c9efceefc99596afc yt/frontends/moab/tests/test_c5.py
--- a/yt/frontends/moab/tests/test_c5.py
+++ b/yt/frontends/moab/tests/test_c5.py
@@ -66,5 +66,4 @@
 
 @requires_file(c5)
 def test_units_override():
-    for test in units_override_check(c5):
-        yield test
+    units_override_check(c5)

diff -r e96467e8da01e92743fbcf40af90f3593d414af9 -r 7de56413f26af88f8d5d9d3c9efceefc99596afc yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -57,8 +57,7 @@
 
 @requires_file(output_00080)
 def test_units_override():
-    for test in units_override_check(output_00080):
-        yield test
+    units_override_check(output_00080)
 
 
 ramsesNonCosmo = 'DICEGalaxyDisk_nonCosmological/output_00002'


https://bitbucket.org/yt_analysis/yt/commits/96c70d0b4c8f/
Changeset:   96c70d0b4c8f
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 21:05:43+00:00
Summary:     avoid warnings from analysis modules
Affected #:  3 files

diff -r 7de56413f26af88f8d5d9d3c9efceefc99596afc -r 96c70d0b4c8ff66616010da70449ff64256db3f2 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -52,6 +52,7 @@
                  validators=[ValidateSpatial(0)],
                  take_log=False,
                  display_field=False,
+                 sampling_type='cell',
                  units='')
 
 class Clump(TreeContainer):

diff -r 7de56413f26af88f8d5d9d3c9efceefc99596afc -r 96c70d0b4c8ff66616010da70449ff64256db3f2 yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -10,9 +10,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.analysis_modules.photon_simulator.api import \
-    XSpecThermalModel, XSpecAbsorbModel, \
-    ThermalPhotonModel, PhotonList
+import warnings
+
+with warnings.catch_warnings():
+    warnings.simplefilter("ignore")
+    from yt.analysis_modules.photon_simulator.api import \
+        XSpecThermalModel, XSpecAbsorbModel, \
+        ThermalPhotonModel, PhotonList
 from yt.config import ytcfg
 from yt.testing import requires_file, requires_module
 import numpy as np

diff -r 7de56413f26af88f8d5d9d3c9efceefc99596afc -r 96c70d0b4c8ff66616010da70449ff64256db3f2 yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -172,10 +172,12 @@
         self.current_v = 0.0
 
         _vlos = create_vlos(normal, self.no_shifting)
-        self.ds.add_field(("gas","v_los"), function=_vlos, units="cm/s")
+        self.ds.add_field(("gas","v_los"), function=_vlos, units="cm/s",
+                          sampling_type='cell')
 
         _intensity = self._create_intensity()
-        self.ds.add_field(("gas","intensity"), function=_intensity, units=self.field_units)
+        self.ds.add_field(("gas","intensity"), function=_intensity,
+                          units=self.field_units, sampling_type='cell')
 
         if method == "integrate" and weight_field is None:
             self.proj_units = str(ds.quan(1.0, self.field_units+"*cm").units)


https://bitbucket.org/yt_analysis/yt/commits/09a70759ef59/
Changeset:   09a70759ef59
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 21:05:53+00:00
Summary:     print the warning about sampling_type being unset at the proper stack level
Affected #:  1 file

diff -r 96c70d0b4c8ff66616010da70449ff64256db3f2 -r 09a70759ef5995a707f3bd2dc3096cb3807c1e92 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -1187,7 +1187,7 @@
                 sampling_type = "particle"
         if sampling_type is None:
             warnings.warn("Because 'sampling_type' not specified, yt will "
-                          "assume a cell 'sampling_type'")
+                          "assume a cell 'sampling_type'", stacklevel=2)
             sampling_type = "cell"
         self.field_info.add_field(name, sampling_type, function=function, **kwargs)
         self.field_info._show_field_errors.append(name)


https://bitbucket.org/yt_analysis/yt/commits/57321497fa2e/
Changeset:   57321497fa2e
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 21:36:37+00:00
Summary:     add missing sampling_type kwargs to kill warnings in field tests
Affected #:  2 files

diff -r 09a70759ef5995a707f3bd2dc3096cb3807c1e92 -r 57321497fa2e04edd3d010ffebf6151aeb566ee8 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -238,15 +238,23 @@
     def unitless_data(field, data):
             return np.ones(data['density'].shape)
 
-    ds.add_field(('gas','density_alias_no_units'), function=density_alias)
-    ds.add_field(('gas','density_alias_auto'), function=density_alias,
-                 units='auto', dimensions='density')
-    ds.add_field(('gas','density_alias_wrong_units'), function=density_alias,
+    ds.add_field(('gas','density_alias_no_units'), sampling_type='cell',
+                 function=density_alias)
+    ds.add_field(('gas','density_alias_auto'), sampling_type='cell',
+                 function=density_alias, units='auto', dimensions='density')
+    ds.add_field(('gas','density_alias_wrong_units'),
+                 function=density_alias,
+                 sampling_type='cell',
                  units='m/s')
-    ds.add_field(('gas','density_alias_unparseable_units'), function=density_alias,
+    ds.add_field(('gas','density_alias_unparseable_units'),
+                 sampling_type='cell',
+                 function=density_alias,
                  units='dragons')
-    ds.add_field(('gas','density_alias_auto_wrong_dims'), function=density_alias,
-                 units='auto', dimensions="temperature")
+    ds.add_field(('gas','density_alias_auto_wrong_dims'),
+                 function=density_alias,
+                 sampling_type='cell',
+                 units='auto',
+                 dimensions="temperature")
     assert_raises(YTFieldUnitError, get_data, ds, 'density_alias_no_units')
     assert_raises(YTFieldUnitError, get_data, ds, 'density_alias_wrong_units')
     assert_raises(YTFieldUnitParseError, get_data, ds,
@@ -256,11 +264,22 @@
     dens = ad['density_alias_auto']
     assert_equal(str(dens.units), 'g/cm**3')
 
-    ds.add_field(('gas','dimensionless'), function=unitless_data)
-    ds.add_field(('gas','dimensionless_auto'), function=unitless_data,
-                 units='auto', dimensions='dimensionless')
-    ds.add_field(('gas','dimensionless_explicit'), function=unitless_data, units='')
-    ds.add_field(('gas','dimensionful'), function=unitless_data, units='g/cm**3')
+    ds.add_field(('gas','dimensionless'),
+                 sampling_type='cell',
+                 function=unitless_data)
+    ds.add_field(('gas','dimensionless_auto'),
+                 function=unitless_data,
+                 sampling_type='cell',
+                 units='auto',
+                 dimensions='dimensionless')
+    ds.add_field(('gas','dimensionless_explicit'),
+                 function=unitless_data,
+                 sampling_type='cell',
+                 units='')
+    ds.add_field(('gas','dimensionful'),
+                 sampling_type='cell',
+                 function=unitless_data,
+                 units='g/cm**3')
 
     assert_equal(str(ad['dimensionless'].units), 'dimensionless')
     assert_equal(str(ad['dimensionless_auto'].units), 'dimensionless')
@@ -281,7 +300,8 @@
     def density_alias(field, data):
         return data['density']
 
-    ds.add_field('density_alias', function=density_alias, units='g/cm**3')
+    ds.add_field('density_alias', sampling_type='cell',
+                 function=density_alias, units='g/cm**3')
 
     ad['density_alias']
     assert ds.derived_field_list[0] == 'density_alias'
@@ -292,7 +312,8 @@
     def density_alias(field, data):
         return data['density']
 
-    ds.add_field('density_alias', function=density_alias, units='g/cm**3')
+    ds.add_field('density_alias', sampling_type='cell',
+                 function=density_alias, units='g/cm**3')
 
     ds.field_info['density_alias']
     ds.field_info['gas', 'density_alias']
@@ -302,8 +323,9 @@
     def pmass_alias(field, data):
         return data['particle_mass']
         
-    ds.add_field('particle_mass_alias', function=pmass_alias, 
-                 units='g', particle_type=True)
+    ds.add_field('particle_mass_alias', function=pmass_alias,
+                 sampling_type='particle',
+                 units='g')
 
     ds.field_info['particle_mass_alias']
     ds.field_info['all', 'particle_mass_alias']

diff -r 09a70759ef5995a707f3bd2dc3096cb3807c1e92 -r 57321497fa2e04edd3d010ffebf6151aeb566ee8 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -15,7 +15,6 @@
 
 
 import numpy as np
-import time
 
 from yt.frontends.stream.data_structures import load_particles
 from yt.geometry.oct_container import \


https://bitbucket.org/yt_analysis/yt/commits/2d8d1f3cced8/
Changeset:   2d8d1f3cced8
Branch:      yt
User:        ngoldbaum
Date:        2017-03-28 23:16:26+00:00
Summary:     use sampling_type instead of particle_type in FieldInfoContainer initializer
Affected #:  1 file

diff -r 57321497fa2e04edd3d010ffebf6151aeb566ee8 -r 2d8d1f3cced8594f778622e7aee40a4497475988 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -257,7 +257,7 @@
             self[name] = DerivedField(name, sampling_type, function, **kwargs)
             return
 
-        if kwargs.get("particle_type", False):
+        if sampling_type == 'particle':
             ftype = 'all'
         else:
             ftype = self.ds.default_fluid_type


https://bitbucket.org/yt_analysis/yt/commits/90eaae3b17e8/
Changeset:   90eaae3b17e8
Branch:      yt
User:        ngoldbaum
Date:        2017-04-14 15:38:05+00:00
Summary:     merging
Affected #:  50 files

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 doc/source/help/index.rst
--- a/doc/source/help/index.rst
+++ b/doc/source/help/index.rst
@@ -72,8 +72,8 @@
 
   $ yt --help
 
-If you continue to see errors, you should try contacting us via IRC or email
-but you may have to reinstall yt (see :ref:`getting-and-installing-yt`).
+If you continue to see errors, you should try contacting us via Slack, IRC or
+email but you may have to reinstall yt (see :ref:`getting-and-installing-yt`).
 
 .. _search-the-documentation:
 
@@ -170,17 +170,24 @@
 
 .. _irc:
 
-Go on IRC to ask a question
----------------------------
+Go on Slack or IRC to ask a question
+------------------------------------
+
+If you want a fast, interactive experience, you could try jumping into our Slack
+or IRC channels to get your questions answered in a chatroom style environment.
 
-If you want a fast, interactive experience, you could try jumping into our IRC
-channel to get your questions answered in a chatroom style environment.  You
-don't even need to have any special IRC client in order to join.  We are the
-#yt channel on irc.freenode.net, but you can also connect using your web
-browser by going to http://yt-project.org/irc.html .  There are usually 2-8
-members of the user base and development team online, so you'll probably get
-your answers quickly.  Remember to bring the information from the
-:ref:`last step <isolate_and_document>`.
+To join our slack channel you will need to request an invite by going to
+http://yt-project.org/development.html, click the "Join as @ Slack!" button, and
+fill out the form. You will get an invite as soon as an administrator approves
+your request.
+
+Alternatively you can go to our IRC channel, which does not require an
+invite. You don't even need to have any special IRC client in order to join the
+IRC channel.  We are the #yt channel on irc.freenode.net, but you can also
+connect using your web browser by going to http://yt-project.org/irc.html .
+There are usually 2-8 members of the user base and development team online, so
+you'll probably get your answers quickly.  Remember to bring the information
+from the :ref:`last step <isolate_and_document>`.
 
 .. _mailing-list:
 

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -416,8 +416,19 @@
 
 .. _source-installation:
 
-Installing yt Using ``pip`` or From Source
-++++++++++++++++++++++++++++++++++++++++++
+Installing yt Using ``pip``
++++++++++++++++++++++++++++
+
+If you already have a python installation that you manage using ``pip`` you can
+install the latest release of yt by doing::
+
+  $ pip install yt
+
+If you do not have root access you may need to append ``--user`` to install to a
+location in your home folder.
+
+Installing yt from source
++++++++++++++++++++++++++
 
 .. note::
 
@@ -432,28 +443,20 @@
 - A C compiler such as ``gcc`` or ``clang``
 - ``Python 2.7``, ``Python 3.4``, or ``Python 3.5``
 
-In addition, building yt from source requires several python packages
+In addition, building yt from source requires ``numpy`` and ``cython``
 which can be installed with ``pip``:
 
 .. code-block:: bash
 
-  $ pip install numpy matplotlib cython sympy
+  $ pip install numpy cython
 
 You may also want to install some of yt's optional dependencies, including
 ``jupyter``, ``h5py`` (which in turn depends on the HDF5 library), ``scipy``, or
 ``astropy``,
 
-From here, you can use ``pip`` (which comes with ``Python``) to install the
-latest stable version of yt:
-
-.. code-block:: bash
-
-  $ pip install yt
-
-The source code for yt may be found at the Bitbucket project site and can also
-be utilized for installation. If you prefer to install the development version
-of yt instead of the latest stable release, you will need ``mercurial`` to clone
-the official repo:
+The source code for yt may be found on Bitbucket. If you prefer to install the
+development version of yt instead of the latest stable release, you will need
+``mercurial`` to clone the official repo:
 
 .. code-block:: bash
 
@@ -476,10 +479,10 @@
 ``$HOME/Library/Python/2.7/lib/python/site-packages/`` on OSX) Please refer to
 the ``setuptools`` documentation for the additional options.
 
-If you are unable to locate the ``yt`` executable (i.e. ``yt version`` failes),
-then you likely need to add the ``$HOME/.local/bin`` (or the equivalent on your
-OS) to your PATH. Some linux distributions do not include this directory in the
-default search path.
+If you are unable to locate the ``yt`` executable (i.e. executing ``yt version``
+at the bash command line fails), then you likely need to add the
+``$HOME/.local/bin`` (or the equivalent on your OS) to your PATH. Some linux
+distributions do not include this directory in the default search path.
 
 If you choose this installation method, you do not need to run any activation
 script since this will install yt into your global python environment.
@@ -494,9 +497,9 @@
   $ hg update yt
   $ python setup.py develop --user --prefix=
 
-As above, you can leave off ``--user --prefix=`` if you want to install yt into the default
-package install path.  If you do not have write access for this location, you
-might need to use ``sudo``.
+As above, you can leave off ``--user --prefix=`` if you want to install yt into
+the default package install path.  If you do not have write access for this
+location, you might need to use ``sudo``.
 
 Build errors with ``setuptools`` or ``distribute``
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 doc/source/quickstart/6)_Volume_Rendering.ipynb
--- a/doc/source/quickstart/6)_Volume_Rendering.ipynb
+++ b/doc/source/quickstart/6)_Volume_Rendering.ipynb
@@ -93,12 +93,12 @@
     "\n",
     "source = sc.sources['source_00']\n",
     "\n",
-    "source.set_fields('density', no_ghost=False)\n",
+    "source.field = 'density'\n",
     "\n",
     "tf = yt.ColorTransferFunction((-28, -25))\n",
     "tf.add_layers(4, w=0.03)\n",
     "\n",
-    "source.set_transfer_function(tf)\n",
+    "source.transfer_function = tf\n",
     "\n",
     "sc.show(sigma_clip=4.0)"
    ]

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -874,6 +874,7 @@
    ~yt.utilities.cosmology.Cosmology.expansion_factor
    ~yt.utilities.cosmology.Cosmology.z_from_t
    ~yt.utilities.cosmology.Cosmology.t_from_z
+   ~yt.utilities.cosmology.Cosmology.get_dark_factor
 
 Testing Infrastructure
 ----------------------

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 setup.py
--- a/setup.py
+++ b/setup.py
@@ -92,10 +92,6 @@
               ["yt/geometry/fake_octree.pyx"],
               include_dirs=["yt/utilities/lib/"],
               libraries=std_libs),
-    Extension("yt.utilities.spatial.ckdtree",
-              ["yt/utilities/spatial/ckdtree.pyx"],
-              include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs),
     Extension("yt.utilities.lib.autogenerated_element_samplers",
               ["yt/utilities/lib/autogenerated_element_samplers.pyx"],
               include_dirs=["yt/utilities/lib/"]),
@@ -208,8 +204,6 @@
                             "yt/geometry/",
                             "yt/utilities/lib/"],
               depends=glob.glob("yt/frontends/artio/artio_headers/*.c")),
-    Extension("yt.utilities.spatial._distance_wrap",
-              glob.glob("yt/utilities/spatial/src/*.c")),
 ]
 
 # EMBREE
@@ -391,13 +385,3 @@
     scripts=["scripts/iyt"],
     ext_modules=cython_extensions + extensions,
 )
-
-# This info about 'ckdtree' should be incorporated somehow...
-#    setup(maintainer="SciPy Developers",
-#          author="Anne Archibald",
-#          maintainer_email="scipy-dev at scipy.org",
-#          description="Spatial algorithms and data structures",
-#          url="http://www.scipy.org",
-#          license="SciPy License (BSD Style)",
-#          **configuration(top_path='').todict()
-#   )

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -17,7 +17,7 @@
   local_fits_001:
     - yt/frontends/fits/tests/test_outputs.py
 
-  local_flash_004:
+  local_flash_005:
     - yt/frontends/flash/tests/test_outputs.py
 
   local_gadget_001:
@@ -42,7 +42,7 @@
   local_owls_001:
     - yt/frontends/owls/tests/test_outputs.py
 
-  local_pw_012:
+  local_pw_013:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -20,6 +20,7 @@
 from yt.config import \
     ytcfg
 from yt.funcs import \
+    ensure_dir, \
     mylog, \
     only_on_root
 from yt.analysis_modules.cosmological_observation.cosmology_splice import \
@@ -112,8 +113,7 @@
         self.output_prefix = output_prefix
 
         # Create output directory.
-        if not os.path.exists(self.output_dir):
-            only_on_root(os.mkdir, self.output_dir)
+        ensure_dir(self.output_dir)
 
         # Calculate light cone solution.
         CosmologySplice.__init__(self, parameter_filename, simulation_type,

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -478,10 +478,12 @@
                         field_types=ftypes, extra_attrs=extra_attrs)
 
     def add_default_quantities(self, field_type='halos'):
-        self.add_quantity("particle_identifier", field_type=field_type,prepend=True)
-        self.add_quantity("particle_mass", field_type=field_type,prepend=True)
-        self.add_quantity("particle_position_x", field_type=field_type,prepend=True)
-        self.add_quantity("particle_position_y", field_type=field_type,prepend=True)
-        self.add_quantity("particle_position_z", field_type=field_type,prepend=True)
-        self.add_quantity("virial_radius", field_type=field_type,prepend=True)
-
+        for field in ["particle_identifier", "particle_mass",
+                      "particle_position_x", "particle_position_y",
+                      "particle_position_z", "virial_radius"]:
+            field_name = (field_type, field)
+            if field_name not in self.halos_ds.field_list:
+                mylog.warn("Halo dataset %s has no field %s." %
+                           (self.halos_ds, str(field_name)))
+                continue
+            self.add_quantity(field, field_type=field_type, prepend=True)

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/analysis_modules/halo_analysis/halo_filters.py
--- a/yt/analysis_modules/halo_analysis/halo_filters.py
+++ b/yt/analysis_modules/halo_analysis/halo_filters.py
@@ -16,8 +16,9 @@
 import numpy as np
 
 from yt.utilities.operator_registry import \
-     OperatorRegistry
-from yt.utilities.spatial import KDTree
+    OperatorRegistry
+from yt.utilities.on_demand_imports import \
+    _scipy as scipy
 
 from .halo_callbacks import HaloCallback
 
@@ -95,7 +96,8 @@
     rad = data_source[ptype, "virial_radius"].in_units("Mpc").to_ndarray()
     ids = data_source[ptype, "particle_identifier"].to_ndarray().astype("int")
     parents = -1 * np.ones_like(ids, dtype="int")
-    my_tree = KDTree(pos)
+    boxsize = data_source.ds.domain_width.in_units('Mpc')
+    my_tree = scipy.spatial.cKDTree(pos, boxsize=boxsize)
 
     for i in range(ids.size):
         neighbors = np.array(

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -75,7 +75,10 @@
 CONFIG_DIR = os.environ.get(
     'XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config', 'yt'))
 if not os.path.exists(CONFIG_DIR):
-    os.makedirs(CONFIG_DIR)
+    try: 
+        os.makedirs(CONFIG_DIR)
+    except OSError:
+        warnings.warn("unable to create yt config directory")
 
 CURRENT_CONFIG_FILE = os.path.join(CONFIG_DIR, 'ytrc')
 _OLD_CONFIG_FILE = os.path.join(os.path.expanduser('~'), '.yt', 'config')
@@ -116,8 +119,11 @@
 if not os.path.exists(CURRENT_CONFIG_FILE):
     cp = configparser.ConfigParser()
     cp.add_section("yt")
-    with open(CURRENT_CONFIG_FILE, 'w') as new_cfg:
-        cp.write(new_cfg)
+    try:
+        with open(CURRENT_CONFIG_FILE, 'w') as new_cfg:
+            cp.write(new_cfg)
+    except IOError:
+        warnings.warn("unable to write new config file")
 
 class YTConfigParser(configparser.ConfigParser, object):
     def __setitem__(self, key, val):

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -630,9 +630,23 @@
         if len(part) > 0: self._fill_particles(part)
         if len(fill) > 0: self._fill_fields(fill)
         for a, f in sorted(alias.items()):
-            self[a] = f(self)
+            if f.particle_type:
+                self[a] = self._data_source[f]
+            else:
+                self[a] = f(self)
             self.field_data[a].convert_to_units(f.output_units)
-        if len(gen) > 0: self._generate_fields(gen)
+        if len(gen) > 0:
+            part_gen = []
+            cell_gen = []
+            for field in gen:
+                finfo = self.ds.field_info[field]
+                if finfo.particle_type:
+                    part_gen.append(field)
+                else:
+                    cell_gen.append(field)
+            self._generate_fields(cell_gen)
+            for p in part_gen:
+                self[p] = self._data_source[p]
 
     def _split_fields(self, fields_to_get):
         fill, gen = self.index._split_fields(fields_to_get)

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -71,7 +71,7 @@
 
     def wrap_func(self, field_name, old_fi):
         new_fi = copy.copy(old_fi)
-        new_fi.name = (self.filtered_type, field_name[1])
+        new_fi.name = (self.name, field_name[1])
         if old_fi._function == NullFunc:
             new_fi._function = TranslationFunc(old_fi.name)
         return new_fi

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -708,7 +708,7 @@
                  self.GridDimensions,
                  cell_size)
 
-            locs = storage.values[:, :, fi] > 0.0
+            locs = storage.values[:, :, fi] != 0.0
             storage.used[locs] = True
 
             if self.weight_field is not None:

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -964,10 +964,18 @@
 
         if getattr(self, "cosmological_simulation", False):
             # this dataset is cosmological, add a cosmology object
+
+            # Set dynamical dark energy parameters
+            use_dark_factor = getattr(self, 'use_dark_factor', False)
+            w_0 = getattr(self, 'w_0', -1.0)
+            w_a = getattr(self, 'w_a', 0.0)
+
             self.cosmology = \
                     Cosmology(hubble_constant=self.hubble_constant,
                               omega_matter=self.omega_matter,
-                              omega_lambda=self.omega_lambda)
+                              omega_lambda=self.omega_lambda,
+                              use_dark_factor = use_dark_factor,
+                              w_0 = w_0, w_a = w_a)
             self.critical_density = \
                     self.cosmology.critical_density(self.current_redshift)
             self.scale_factor = 1.0 / (1.0 + self.current_redshift)

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/data_objects/tests/test_particle_filter.py
--- a/yt/data_objects/tests/test_particle_filter.py
+++ b/yt/data_objects/tests/test_particle_filter.py
@@ -1,6 +1,8 @@
 from __future__ import print_function
 import yt
-from yt.testing import requires_file
+from yt.testing import \
+    assert_equal, \
+    requires_file
 from yt.data_objects.particle_filters import \
     add_particle_filter, particle_filter
 
@@ -45,3 +47,22 @@
     ad = ds.all_data()
     ad['deposit', 'stars_cic']
     assert True
+
+ at requires_file(iso_galaxy)
+def test_covering_grid_particle_filter():
+    @particle_filter(requires=["particle_type"], filtered_type='all')
+    def stars(pfilter, data):
+        filter = data[(pfilter.filtered_type, "particle_type")] == 2
+        return filter
+
+    ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+    ds.add_particle_filter('stars')
+
+    for grid in ds.index.grids[20:31]:
+        cg = ds.covering_grid(grid.Level, grid.LeftEdge, grid.ActiveDimensions)
+        
+        assert_equal(cg['stars', 'particle_ones'].shape[0],
+                     grid['stars', 'particle_ones'].shape[0])
+        assert_equal(cg['stars', 'particle_mass'].shape[0],
+                     grid['stars', 'particle_mass'].shape[0])

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -1,3 +1,4 @@
+import yt
 import numpy as np
 
 from yt.data_objects.profiles import \
@@ -194,3 +195,28 @@
     assert_raises(
         YTIllDefinedProfile, PhasePlot, ad, 'particle_radius', 'particle_mass',
         'particle_ones')
+
+def test_particle_profile_negative_field():
+    # see Issue #1340
+    n_particles = int(1e4)
+
+    ppx, ppy, ppz = np.random.normal(size=[3, n_particles])
+    pvx, pvy, pvz = - np.ones((3, n_particles))
+
+    data = {'particle_position_x': ppx,
+            'particle_position_y': ppy,
+            'particle_position_z': ppz,
+            'particle_velocity_x': pvx,
+            'particle_velocity_y': pvy,
+            'particle_velocity_z': pvz}
+
+    bbox = 1.1*np.array([[min(ppx), max(ppx)], [min(ppy), max(ppy)], [min(ppz), max(ppz)]])
+    ds = yt.load_particles(data, bbox=bbox)
+    ad = ds.all_data()
+
+    profile = yt.create_profile(
+        ad,
+        ["particle_position_x", "particle_position_y"],
+        "particle_velocity_x",
+        weight_field=None)
+    assert profile['particle_velocity_x'].min() < 0

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -1204,17 +1204,18 @@
     def __init__(self, ds, dataset_type='nyx_native'):
         super(NyxHierarchy, self).__init__(ds, dataset_type)
 
-        # extra beyond the base real fields that all Boxlib
-        # particles have, i.e. the xyz positions
-        nyx_extra_real_fields = ['particle_mass',
-                                 'particle_velocity_x',
-                                 'particle_velocity_y',
-                                 'particle_velocity_z']
+        if ("particles" in self.ds.parameters):
+            # extra beyond the base real fields that all Boxlib
+            # particles have, i.e. the xyz positions
+            nyx_extra_real_fields = ['particle_mass',
+                                     'particle_velocity_x',
+                                     'particle_velocity_y',
+                                     'particle_velocity_z']
 
-        is_checkpoint = False
+            is_checkpoint = False
 
-        self._read_particles("DM", is_checkpoint, 
-                             nyx_extra_real_fields[0:self.ds.dimensionality+1])
+            self._read_particles("DM", is_checkpoint, 
+                                 nyx_extra_real_fields[0:self.ds.dimensionality+1])
 
 
 class NyxDataset(BoxlibDataset):

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/frontends/boxlib/tests/test_outputs.py
--- a/yt/frontends/boxlib/tests/test_outputs.py
+++ b/yt/frontends/boxlib/tests/test_outputs.py
@@ -224,3 +224,28 @@
 @requires_file(rt)
 def test_units_override():
     units_override_check(rt)
+
+nyx_no_particles = "nyx_sedov_plt00086"
+ at requires_file(nyx_no_particles)
+def test_nyx_no_part():
+    assert isinstance(data_dir_load(nyx_no_particles), NyxDataset)
+
+    fields = sorted(
+        [('boxlib', 'H'), ('boxlib', 'He'), ('boxlib', 'MachNumber'),
+         ('boxlib', 'Ne'), ('boxlib', 'Rank'), ('boxlib', 'StateErr'),
+         ('boxlib', 'Temp'), ('boxlib', 'X(H)'), ('boxlib', 'X(He)'),
+         ('boxlib', 'density'), ('boxlib', 'divu'), ('boxlib', 'eint_E'),
+         ('boxlib', 'eint_e'), ('boxlib', 'entropy'), ('boxlib', 'forcex'),
+         ('boxlib', 'forcey'), ('boxlib', 'forcez'), ('boxlib', 'kineng'),
+         ('boxlib', 'logden'), ('boxlib', 'magmom'), ('boxlib', 'magvel'),
+         ('boxlib', 'magvort'), ('boxlib', 'pressure'), ('boxlib', 'rho_E'),
+         ('boxlib', 'rho_H'), ('boxlib', 'rho_He'), ('boxlib', 'rho_e'),
+         ('boxlib', 'soundspeed'), ('boxlib', 'x_velocity'), ('boxlib', 'xmom'),
+         ('boxlib', 'y_velocity'), ('boxlib', 'ymom'), ('boxlib', 'z_velocity'),
+         ('boxlib', 'zmom')])
+
+    ds = data_dir_load(nyx_no_particles)
+    assert_equal(sorted(ds.field_list), fields)
+
+
+>>>>>>> merge rev:    f119d6d90c9f yt - ngoldbau: Merged in ngoldbaum/yt (pul...

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -13,14 +13,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import random
-from contextlib import contextmanager
-
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
-from yt.geometry.selection_routines import AlwaysSelector
-from yt.extern.six import u, b, iteritems
+from yt.extern.six import b, iteritems
 from yt.utilities.on_demand_imports import _h5py as h5py
 
 import numpy as np
@@ -77,33 +73,8 @@
         return (KeyError,)
 
     def _read_particle_coords(self, chunks, ptf):
-        chunks = list(chunks)
-        for chunk in chunks: # These should be organized by grid filename
-            f = None
-            for g in chunk.objs:
-                if g.filename is None: continue
-                if f is None:
-                    #print "Opening (count) %s" % g.filename
-                    f = h5py.File(g.filename, "r")
-                nap = sum(g.NumberOfActiveParticles.values())
-                if g.NumberOfParticles == 0 and nap == 0:
-                    continue
-                ds = f.get("/Grid%08i" % g.id)
-                for ptype, field_list in sorted(ptf.items()):
-                    if ptype != "io":
-                        if g.NumberOfActiveParticles[ptype] == 0: continue
-                        pds = ds.get("Particles/%s" % ptype)
-                    else:
-                        pds = ds
-                    pn = _particle_position_names.get(ptype,
-                            r"particle_position_%s")
-                    x, y, z = (np.asarray(pds.get(pn % ax).value, dtype="=f8")
-                               for ax in 'xyz')
-                    for field in field_list:
-                        if np.asarray(pds[field]).ndim > 1:
-                            self._array_fields[field] = pds[field].shape
-                    yield ptype, (x, y, z)
-            if f: f.close()
+        for rv in self._read_particle_fields(chunks, ptf, None):
+            yield rv
 
     def _read_particle_fields(self, chunks, ptf, selector):
         chunks = list(chunks)
@@ -128,6 +99,11 @@
                             r"particle_position_%s")
                     x, y, z = (np.asarray(pds.get(pn % ax).value, dtype="=f8")
                                for ax in 'xyz')
+                    if selector is None:
+                        # This only ever happens if the call is made from
+                        # _read_particle_coords.
+                        yield ptype, (x, y, z)
+                        continue
                     mask = selector.select_points(x, y, z, 0.0)
                     if mask is None: continue
                     for field in field_list:
@@ -137,168 +113,53 @@
                         yield (ptype, field), data[mask]
             if f: f.close()
 
-    def _read_fluid_selection(self, chunks, selector, fields, size):
-        rv = {}
-        # Now we have to do something unpleasant
-        chunks = list(chunks)
-        if selector.__class__.__name__ == "GridSelector":
-            if not (len(chunks) == len(chunks[0].objs) == 1):
-                raise RuntimeError
-            g = chunks[0].objs[0]
-            f = h5py.File(u(g.filename), 'r')
-            if g.id in self._cached_fields:
-                gf = self._cached_fields[g.id]
-                rv.update(gf)
-            if len(rv) == len(fields): return rv
-            gds = f.get("/Grid%08i" % g.id)
-            for field in fields:
-                if field in rv:
-                    self._hits += 1
-                    continue
-                self._misses += 1
-                ftype, fname = field
-                if fname in gds:
-                    rv[(ftype, fname)] = gds.get(fname).value.swapaxes(0, -1)
-                else:
-                    rv[(ftype, fname)] = np.zeros(g.ActiveDimensions)
-            if self._cache_on:
-                for gid in rv:
-                    self._cached_fields.setdefault(gid, {})
-                    self._cached_fields[gid].update(rv[gid])
-            f.close()
-            return rv
-        if size is None:
-            size = sum((g.count(selector) for chunk in chunks
-                        for g in chunk.objs))
-        for field in fields:
-            ftype, fname = field
-            fsize = size
-            rv[field] = np.empty(fsize, dtype="float64")
-        ng = sum(len(c.objs) for c in chunks)
-        mylog.debug("Reading %s cells of %s fields in %s grids",
-                   size, [f2 for f1, f2 in fields], ng)
-        ind = 0
-        h5_type = self._field_dtype
+    def io_iter(self, chunks, fields):
+        h5_dtype = self._field_dtype
         for chunk in chunks:
             fid = None
-            for g in chunk.objs:
-                if g.filename is None: continue
-                if fid is None:
-                    fid = h5py.h5f.open(b(g.filename), h5py.h5f.ACC_RDONLY)
-                gf = self._cached_fields.get(g.id, {})
-                data = np.empty(g.ActiveDimensions[::-1], dtype=h5_type)
-                data_view = data.swapaxes(0, -1)
-                nd = 0
+            filename = -1
+            for obj in chunk.objs:
+                if obj.filename is None: continue
+                if obj.filename != filename:
+                    # Note one really important thing here: even if we do
+                    # implement LRU caching in the _read_obj_field function,
+                    # we'll still be doing file opening and whatnot.  This is a
+                    # problem, but one we can return to.
+                    if fid is not None:
+                        fid.close()
+                    fid = h5py.h5f.open(b(obj.filename), h5py.h5f.ACC_RDONLY)
+                    filename = obj.filename
+                data = np.empty(obj.ActiveDimensions[::-1], dtype=h5_dtype)
                 for field in fields:
-                    if field in gf:
-                        nd = g.select(selector, gf[field], rv[field], ind)
-                        self._hits += 1
-                        continue
-                    self._misses += 1
-                    ftype, fname = field
-                    try:
-                        node = "/Grid%08i/%s" % (g.id, fname)
-                        dg = h5py.h5d.open(fid, b(node))
-                    except KeyError:
-                        if fname == "Dark_Matter_Density": continue
-                        raise
-                    dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
-                    if self._cache_on:
-                        self._cached_fields.setdefault(g.id, {})
-                        # Copy because it's a view into an empty temp array
-                        self._cached_fields[g.id][field] = data_view.copy()
-                    nd = g.select(selector, data_view, rv[field], ind) # caches
-                ind += nd
-            if fid: fid.close()
-        return rv
-
-    @contextmanager
-    def preload(self, chunk, fields, max_size):
-        if len(fields) == 0:
-            yield self
-            return
-        old_cache_on = self._cache_on
-        old_cached_fields = self._cached_fields
-        self._cached_fields = cf = {}
-        self._cache_on = True
-        for gid in old_cached_fields:
-            # Will not copy numpy arrays, which is good!
-            cf[gid] = old_cached_fields[gid].copy() 
-        self._hits = self._misses = 0
-        self._cached_fields = self._read_chunk_data(chunk, fields)
-        mylog.debug("(1st) Hits = % 10i Misses = % 10i",
-            self._hits, self._misses)
-        self._hits = self._misses = 0
-        yield self
-        mylog.debug("(2nd) Hits = % 10i Misses = % 10i",
-            self._hits, self._misses)
-        self._cached_fields = old_cached_fields
-        self._cache_on = old_cache_on
-        # Randomly remove some grids from the cache.  Note that we're doing
-        # this on a grid basis, not a field basis.  Performance will be
-        # slightly non-deterministic as a result of this, but it should roughly
-        # be statistically alright, assuming (as we do) that this will get
-        # called during largely unbalanced stuff.
-        if len(self._cached_fields) > max_size:
-            to_remove = random.sample(self._cached_fields.keys(),
-                len(self._cached_fields) - max_size)
-            mylog.debug("Purging from cache %s", len(to_remove))
-            for k in to_remove:
-                self._cached_fields.pop(k)
+                    yield field, obj, self._read_obj_field(obj, field, (fid, data))
+        if fid is not None:
+            fid.close()
+        
+    def _read_obj_field(self, obj, field, fid_data = None):
+        if fid_data is None: fid_data = (None, None)
+        fid, data = fid_data
+        if fid is None:
+            close = True
+            fid = h5py.h5f.open(b(obj.filename), h5py.h5f.ACC_RDONLY)
         else:
-            mylog.warning("Cache size % 10i (max % 10i)",
-                len(self._cached_fields), max_size)
-
-    def _read_chunk_data(self, chunk, fields):
-        fid = fn = None
-        rv = {}
-        mylog.debug("Preloading fields %s", fields)
-        # Split into particles and non-particles
-        fluid_fields, particle_fields = [], []
-        for ftype, fname in fields:
-            if ftype in self.ds.particle_types:
-                particle_fields.append((ftype, fname))
-            else:
-                fluid_fields.append((ftype, fname))
-        if len(particle_fields) > 0:
-            selector = AlwaysSelector(self.ds)
-            rv.update(self._read_particle_selection(
-              [chunk], selector, particle_fields))
-        if len(fluid_fields) == 0: return rv
-        h5_type = self._field_dtype
-        for g in chunk.objs:
-            rv[g.id] = gf = {}
-            if g.id in self._cached_fields:
-                rv[g.id].update(self._cached_fields[g.id])
-            if g.filename is None: continue
-            elif g.filename != fn:
-                if fid is not None: fid.close()
-                fid = None
-            if fid is None:
-                fid = h5py.h5f.open(b(g.filename), h5py.h5f.ACC_RDONLY)
-                fn = g.filename
-            data = np.empty(g.ActiveDimensions[::-1], dtype=h5_type)
-            data_view = data.swapaxes(0, -1)
-            for field in fluid_fields:
-                if field in gf:
-                    self._hits += 1
-                    continue
-                self._misses += 1
-                ftype, fname = field
-                try:
-                    node = "/Grid%08i/%s" % (g.id, fname)
-                    dg = h5py.h5d.open(fid, b(node))
-                except KeyError:
-                    if fname == "Dark_Matter_Density": continue
-                    raise
-                dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
-                gf[field] = data_view.copy()
-        if fid: fid.close()
-        if self._cache_on:
-            for gid in rv:
-                self._cached_fields.setdefault(gid, {})
-                self._cached_fields[gid].update(rv[gid])
-        return rv
+            close = False
+        if data is None:
+            data = np.empty(obj.ActiveDimensions[::-1],
+                            dtype=self._field_dtype)
+        ftype, fname = field
+        try:
+            node = "/Grid%08i/%s" % (obj.id, fname)
+            dg = h5py.h5d.open(fid, b(node))
+        except KeyError:
+            if fname == "Dark_Matter_Density": return None
+            raise
+        dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
+        # I don't know why, but on some installations of h5py this works, but
+        # on others, nope.  Doesn't seem to be a version thing.
+        #dg.close()
+        if close:
+            fid.close()
+        return data.T
 
 class IOHandlerPackedHDF5GhostZones(IOHandlerPackedHDF5):
     _dataset_type = "enzo_packed_3d_gz"
@@ -310,11 +171,9 @@
                       slice(NGZ, -NGZ),
                       slice(NGZ, -NGZ))
 
-    def _read_raw_data_set(self, grid, field):
-        f = h5py.File(grid.filename, "r")
-        ds = f["/Grid%08i/%s" % (grid.id, field)][:].swapaxes(0,2)
-        f.close()
-        return ds
+    def _read_obj_field(self, *args, **kwargs):
+        return super(IOHandlerPackedHDF5GhostZones, self)._read_obj_field(
+                *args, **kwargs)[self._base]
 
 class IOHandlerInMemory(BaseIOHandler):
 

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -18,7 +18,6 @@
 
 from yt.utilities.io_handler import \
     BaseIOHandler
-from yt.utilities.logger import ytLogger as mylog
 from yt.geometry.selection_routines import AlwaysSelector
 from yt.utilities.lib.geometry_utils import \
     compute_morton
@@ -61,6 +60,22 @@
             count_list, conv_factors):
         pass
 
+    def io_iter(self, chunks, fields):
+        f = self._handle
+        for chunk in chunks:
+            for field in fields:
+                # Note that we *prefer* to iterate over the fields on the
+                # outside; here, though, we're iterating over them on the
+                # inside because we may exhaust our chunks.
+                ftype, fname = field
+                ds = f["/%s" % fname]
+                for gs in grid_sequences(chunk.objs):
+                    start = gs[0].id - gs[0]._id_offset
+                    end = gs[-1].id - gs[-1]._id_offset + 1
+                    data = ds[start:end,:,:,:]
+                    for i, g in enumerate(gs):
+                        yield field, g, self._read_obj_field(g, field, (data, i))
+
     def _read_particle_coords(self, chunks, ptf):
         chunks = list(chunks)
         f_part = self._particle_handle
@@ -104,31 +119,18 @@
                     data = p_fields[start:end, fi]
                     yield (ptype, field), data[mask]
 
-    def _read_fluid_selection(self, chunks, selector, fields, size):
-        chunks = list(chunks)
-        if any((ftype != "flash" for ftype, fname in fields)):
-            raise NotImplementedError
-        f = self._handle
-        rv = {}
-        for field in fields:
-            ftype, fname = field
-            # Always use *native* 64-bit float.
-            rv[field] = np.empty(size, dtype="=f8")
-        ng = sum(len(c.objs) for c in chunks)
-        mylog.debug("Reading %s cells of %s fields in %s blocks",
-                    size, [f2 for f1, f2 in fields], ng)
-        for field in fields:
-            ftype, fname = field
-            ds = f["/%s" % fname]
-            ind = 0
-            for chunk in chunks:
-                for gs in grid_sequences(chunk.objs):
-                    start = gs[0].id - gs[0]._id_offset
-                    end = gs[-1].id - gs[-1]._id_offset + 1
-                    data = ds[start:end,:,:,:].transpose()
-                    for i, g in enumerate(gs):
-                        ind += g.select(selector, data[...,i], rv[field], ind)
-        return rv
+    def _read_obj_field(self, obj, field, ds_offset = None):
+        if ds_offset is None: ds_offset = (None, -1)
+        ds, offset = ds_offset
+        # our context here includes datasets and whatnot that are opened in the
+        # hdf5 file
+        if ds is None:
+            ds = self._handle["/%s" % field[1]]
+        if offset == -1:
+            data = ds[obj.id - obj._id_offset, :,:,:].transpose()
+        else:
+            data = ds[offset, :,:,:].transpose()
+        return data
 
     def _read_chunk_data(self, chunk, fields):
         f = self._handle

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -96,7 +96,10 @@
                  field_spec = "default",
                  ptype_spec = "default",
                  units_override=None,
-                 unit_system="cgs"):
+                 unit_system="cgs",
+                 use_dark_factor = False,
+                 w_0 = -1.0,
+                 w_a = 0.0):
         if self._instantiated: return
         self._header_spec = self._setup_binary_spec(
             header_spec, gadget_header_specs)
@@ -122,6 +125,12 @@
         if units_override is not None:
             raise RuntimeError("units_override is not supported for GadgetDataset. "+
                                "Use unit_base instead.")
+
+        # Set dark energy parameters before cosmology object is created
+        self.use_dark_factor = use_dark_factor
+        self.w_0 = w_0
+        self.w_a = w_a
+
         super(GadgetDataset, self).__init__(
             filename, dataset_type=dataset_type, unit_system=unit_system,
             n_ref=n_ref, over_refine_factor=over_refine_factor,

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/frontends/halo_catalog/data_structures.py
--- a/yt/frontends/halo_catalog/data_structures.py
+++ b/yt/frontends/halo_catalog/data_structures.py
@@ -30,6 +30,20 @@
 from yt.data_objects.static_output import \
     ParticleFile
 
+class HaloCatalogParticleIndex(ParticleIndex):
+    def _setup_filenames(self):
+        template = self.dataset.filename_template
+        ndoms = self.dataset.file_count
+        cls = self.dataset._file_class
+        if ndoms > 1:
+            self.data_files = \
+              [cls(self.dataset, self.io, template % {'num':i}, i)
+               for i in range(ndoms)]
+        else:
+            self.data_files = \
+              [cls(self.dataset, self.io,
+                   self.dataset.parameter_filename, 0)]
+
 class HaloCatalogHDF5File(ParticleFile):
     def __init__(self, ds, io, filename, file_id):
         with h5py.File(filename, "r") as f:

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -781,13 +781,7 @@
         name = keyword
     name = os.path.expanduser(name)
     if name[-1] == os.sep and not os.path.isdir(name):
-        try:
-            os.mkdir(name)
-        except OSError as e:
-            if e.errno == errno.EEXIST:
-                pass
-            else:
-                raise
+        ensure_dir(name)
     if os.path.isdir(name):
         name = os.path.join(name, keyword)
     if not name.endswith(suffix):
@@ -797,15 +791,20 @@
 def ensure_dir_exists(path):
     r"""Create all directories in path recursively in a parallel safe manner"""
     my_dir = os.path.dirname(path)
-    if not my_dir:
-        return
-    if not os.path.exists(my_dir):
-        only_on_root(os.makedirs, my_dir)
+    ensure_dir(my_dir)
 
 def ensure_dir(path):
     r"""Parallel safe directory maker."""
-    if not os.path.exists(path):
-        only_on_root(os.makedirs, path)
+    if os.path.exists(path):
+        return path
+
+    try:
+        os.makedirs(path)
+    except OSError as e:
+        if e.errno == errno.EEXIST:
+            pass
+        else:
+            raise
     return path
 
 def validate_width_tuple(width):
@@ -1011,7 +1010,7 @@
 def dummy_context_manager(*args, **kwargs):
     yield
 
-def matplotlib_style_context(style_name=None, after_reset=True):
+def matplotlib_style_context(style_name=None, after_reset=False):
     """Returns a context manager for controlling matplotlib style.
 
     Arguments are passed to matplotlib.style.context() if specified. Defaults
@@ -1021,11 +1020,13 @@
     available, returns a dummy context manager.
     """
     if style_name is None:
-        style_name = 'classic'
+        style_name = {
+            'mathtext.fontset': 'cm',
+            'mathtext.fallback_to_cm': True,
+        }
     try:
         import matplotlib.style
-        if style_name in matplotlib.style.available:
-            return matplotlib.style.context(style_name, after_reset=after_reset)
+        return matplotlib.style.context(style_name, after_reset=after_reset)
     except ImportError:
         pass
     return dummy_context_manager()

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -69,13 +69,18 @@
     def convert(self, unit):
         return self.dataset.conversion_factors[unit]
 
-    def _initialize_particle_handler(self):
-        self._setup_data_io()
+    def _setup_filenames(self):
         template = self.dataset.filename_template
         ndoms = self.dataset.file_count
         cls = self.dataset._file_class
-        self.data_files = [cls(self.dataset, self.io, template % {'num':i}, i)
-                           for i in range(ndoms)]
+        self.data_files = \
+          [cls(self.dataset, self.io, template % {'num':i}, i)
+           for i in range(ndoms)]
+
+    def _initialize_particle_handler(self):
+        self._setup_data_io()
+        self._setup_filenames()
+
         index_ptype = self.index_ptype
         if index_ptype == "all":
             self.total_particles = sum(

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -31,6 +31,7 @@
 ytcfg["yt","__command_line"] = "True"
 from yt.startup_tasks import parser, subparsers
 from yt.funcs import \
+    ensure_dir, \
     ensure_list, \
     get_hg_version, \
     get_yt_version, \
@@ -1358,7 +1359,7 @@
             data_dir = args.location
         if not os.path.exists(data_dir):
             print("The directory '%s' does not exist. Creating..." % data_dir)
-            os.mkdir(data_dir)
+            ensure_dir(data_dir)
         data_file = os.path.join(data_dir, args.filename)
         if os.path.exists(data_file) and not args.overwrite:
             raise IOError("File '%s' exists and overwrite=False!" % data_file)

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -58,6 +58,17 @@
     unit_system : :class:`yt.units.unit_systems.UnitSystem`, optional
         The units system to use when making calculations. If not specified,
         cgs units are assumed.
+    use_dark_factor: Bool, optional
+        The flag to either use the cosmological constant (False, default)
+        or to use the parameterization of w(a) as given in Linder 2002. This,
+        along with w_0 and w_a, only matters in the function expansion_factor.
+    w_0 : float, optional
+        The Linder 2002 parameterization of w(a) is: w(a) = w_0 + w_a(1 - a).
+        w_0 is w(a = 1). Only matters if use_dark_factor = True. Default is None.
+        Cosmological constant case corresponds to w_0 = -1.
+    w_a : float, optional
+        See w_0. w_a is the derivative of w(a) evaluated at a = 1. Cosmological
+        constant case corresponds to w_a = 0. Default is None. 
 
     Examples
     --------
@@ -72,7 +83,10 @@
                  omega_lambda = 0.73,
                  omega_curvature = 0.0,
                  unit_registry = None,
-                 unit_system = "cgs"):
+                 unit_system = "cgs",
+                 use_dark_factor = False,
+                 w_0 = -1.0,
+                 w_a = 0.0):
         self.omega_matter = float(omega_matter)
         self.omega_lambda = float(omega_lambda)
         self.omega_curvature = float(omega_curvature)
@@ -87,6 +101,12 @@
         self.unit_registry = unit_registry
         self.hubble_constant = self.quan(hubble_constant, "100*km/s/Mpc")
         self.unit_system = unit_system
+        
+        # For non-standard dark energy. If false, use default cosmological constant
+        # This only affects the expansion_factor function.
+        self.use_dark_factor = use_dark_factor
+        self.w_0 = w_0
+        self.w_a = w_a
 
     def hubble_distance(self):
         r"""
@@ -382,9 +402,18 @@
         cosmological distances.
         
         """
+
+        # Use non-standard dark energy
+        if self.use_dark_factor:
+            dark_factor = self.get_dark_factor(z)
+
+        # Use default cosmological constant
+        else:
+            dark_factor = 1.0
+
         return np.sqrt(self.omega_matter * ((1 + z)**3.0) + 
                        self.omega_curvature * ((1 + z)**2.0) + 
-                       self.omega_lambda)
+                       (self.omega_lambda * dark_factor))
 
     def inverse_expansion_factor(self, z):
         return 1 / self.expansion_factor(z)
@@ -548,6 +577,31 @@
     
         return my_time.in_base(self.unit_system)
 
+    def get_dark_factor(self, z):
+        """
+        This function computes the additional term that enters the expansion factor
+        when using non-standard dark energy. See Dolag et al 2004 eq. 7 for ref (but
+        note that there's a typo in his eq. There should be no negative sign).
+
+        At the moment, this only works using the parameterization given in Linder 2002
+        eq. 7: w(a) = w0 + wa(1 - a) = w0 + wa * z / (1+z). This gives rise to an analytic
+        expression. It is also only functional for Gadget simulations, at the moment.
+
+        Parameters
+        ----------
+        z:  float
+            Redshift
+        """
+
+        # Get value of scale factor a corresponding to redshift z
+        scale_factor = 1.0 / (1.0 + z)
+
+        # Evaluate exponential using Linder02 parameterization
+        dark_factor = np.power(scale_factor, -3.0 * (1.0 + self.w_0 + self.w_a)) * \
+                      np.exp(-3.0 * self.w_a * (1.0 - scale_factor))
+
+        return dark_factor
+
     _arr = None
     @property
     def arr(self):

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -20,16 +20,28 @@
 from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 from yt.extern.six import add_metaclass
+from yt.utilities.lru_cache import \
+    local_lru_cache, _make_key
 
 _axis_ids = {0:2,1:1,2:0}
 
 io_registry = {}
 
+use_caching = 0
+
+def _make_io_key( args, *_args, **kwargs):
+    self, obj, field, ctx = args
+    # Ignore self because we have a self-specific cache
+    return _make_key((obj.id, field), *_args, **kwargs)
+
 class RegisteredIOHandler(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
         if hasattr(cls, "_dataset_type"):
             io_registry[cls._dataset_type] = cls
+        if use_caching and hasattr(cls, "_read_obj_field"):
+            cls._read_obj_field = local_lru_cache(maxsize=use_caching, 
+                    typed=True, make_key=_make_io_key)(cls._read_obj_field)
 
 @add_metaclass(RegisteredIOHandler)
 class BaseIOHandler(object):
@@ -54,7 +66,6 @@
 
     # We need a function for reading a list of sets
     # and a function for *popping* from a queue all the appropriate sets
-
     @contextmanager
     def preload(self, chunk, fields, max_size):
         yield self
@@ -87,7 +98,7 @@
             return return_val
         else:
             return False
-            
+
     def _read_data_set(self, grid, field):
         # check backup file first. if field not found,
         # call frontend-specific io method
@@ -108,6 +119,23 @@
     def _read_data(self, grid, field):
         pass
 
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        # This function has an interesting history.  It previously was mandate
+        # to be defined by all of the subclasses.  But, to avoid having to
+        # rewrite a whole bunch of IO handlers all at once, and to allow a
+        # better abstraction for grid-based frontends, we're now defining it in
+        # the base class.
+        rv = {field: np.empty(size, dtype="=f8") for field in fields} 
+        ind = {field: 0 for field in fields}
+        for field, obj, data in self.io_iter(chunks, fields):
+            if data is None: continue
+            if selector.__class__.__name__ == "GridSelector":
+                ind[field] += data.size
+                rv[field] = data.copy()
+                continue
+            ind[field] += obj.select(selector, data, rv[field], ind[field])
+        return rv
+
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]
         sl[axis] = slice(coord, coord + 1)

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/utilities/lib/image_utilities.pyx
--- a/yt/utilities/lib/image_utilities.pyx
+++ b/yt/utilities/lib/image_utilities.pyx
@@ -17,6 +17,7 @@
 
 def add_points_to_greyscale_image(
         np.ndarray[np.float64_t, ndim=2] buffer,
+        np.ndarray[np.int_t,     ndim=2] buffer_mask,
         np.ndarray[np.float64_t, ndim=1] px,
         np.ndarray[np.float64_t, ndim=1] py,
         np.ndarray[np.float64_t, ndim=1] pv):
@@ -28,6 +29,7 @@
         j = <int> (xs * px[pi])
         i = <int> (ys * py[pi])
         buffer[i, j] += pv[pi]
+        buffer_mask[i, j] = 1
     return
 
 def add_points_to_image(

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/utilities/lru_cache.py
--- a/yt/utilities/lru_cache.py
+++ b/yt/utilities/lru_cache.py
@@ -53,7 +53,7 @@
         return key[0]
     return _HashedSeq(key)
 
-def lru_cache(maxsize=100, typed=False):
+def lru_cache(maxsize=100, typed=False, make_key = _make_key):
     """Least-recently-used cache decorator.
     If *maxsize* is set to None, the LRU features are disabled and the cache
     can grow without bound.
@@ -77,7 +77,6 @@
         cache = dict()
         stats = [0, 0]                  # make statistics updateable non-locally
         HITS, MISSES = 0, 1             # names for the stats fields
-        make_key = _make_key
         cache_get = cache.get           # bound method to lookup key or return None
         _len = len                      # localize the global len() function
         lock = RLock()                  # because linkedlist updates aren't threadsafe
@@ -182,6 +181,8 @@
     return decorating_function
 ### End of backported lru_cache
 
+local_lru_cache = lru_cache
+
 if sys.version_info[:2] >= (3, 3):
     # 3.2 has an lru_cache with an incompatible API
     from functools import lru_cache

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/utilities/parameter_file_storage.py
--- a/yt/utilities/parameter_file_storage.py
+++ b/yt/utilities/parameter_file_storage.py
@@ -18,7 +18,8 @@
 from itertools import islice
 
 from yt.config import ytcfg
-from yt.funcs import mylog
+from yt.funcs import \
+    mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_simple_proxy
 

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/utilities/spatial/README
--- a/yt/utilities/spatial/README
+++ /dev/null
@@ -1,35 +0,0 @@
-Stephen Skory
-s at skory.us
-October 2011
-
-This directory is a modified version of the same directory that is part of
-the scipy.spatial package. It has been modified by me in the following
-ways:
-
-- In ckdtree.pyx, distances and searches over the
-  tree both take periodic boundary
-  conditions into account.
-
-- In ckdtree.pyx, all input and output arrays now
-  use 64-bit types: long and double.
-
-- In ckdtree.pyx, I've added two functions specifically for parallel HOP,
-  chainHOP_get_dens and find_chunk_nearest_neighbors.
-
-- In kdtree.py, I've commented out 'import scipy.sparse',
-  which means that any kdtree functionality that uses sparse
-  will not work. This is to avoid needing to build the rest
-  of scipy, which is a challenge and not necessary for just
-  the kdtree.
-
-- I've removed all of the qhull source and functionality.
-
-- I've removed the 'tests' directory.
-
-- I've removed anything having to do with Bento, the
-  python package manager.
-
-Anything that has been removed can be found in the original scipy
-source distribution.
-
-

diff -r 2d8d1f3cced8594f778622e7aee40a4497475988 -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 yt/utilities/spatial/__init__.py
--- a/yt/utilities/spatial/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Spatial algorithms and data structures (:mod:`scipy.spatial`)
-
-Nearest-neighbor queries:
-
-.. autosummary::
-
-   KDTree      -- class for efficient nearest-neighbor queries
-   cKDTree     -- class for efficient nearest-neighbor queries (faster impl.)
-   distance    -- module containing many different distance measures
-
-Delaunay triangulation:
-
-.. autosummary::
-
-   Delaunay
-   tsearch
-
-"""
-from __future__ import absolute_import
-
-from .kdtree import *
-from .ckdtree import *
-#from qhull import *
-
-__all__ = list(filter(lambda s: not s.startswith('_'), dir()))
-__all__ += ['distance']
-
-from . import distance
-from numpy.testing import Tester
-test = Tester().test

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/97c2c70f2c18/
Changeset:   97c2c70f2c18
Branch:      yt
User:        ngoldbaum
Date:        2017-04-14 17:40:15+00:00
Summary:     fix bad merge
Affected #:  1 file

diff -r 90eaae3b17e8d6761054eb6b41026e2b9c5b0cd6 -r 97c2c70f2c18a7c5d04a156ab5dc1f2cf0b5480e yt/frontends/boxlib/tests/test_outputs.py
--- a/yt/frontends/boxlib/tests/test_outputs.py
+++ b/yt/frontends/boxlib/tests/test_outputs.py
@@ -246,6 +246,3 @@
 
     ds = data_dir_load(nyx_no_particles)
     assert_equal(sorted(ds.field_list), fields)
-
-
->>>>>>> merge rev:    f119d6d90c9f yt - ngoldbau: Merged in ngoldbaum/yt (pul...


https://bitbucket.org/yt_analysis/yt/commits/416bc87fd064/
Changeset:   416bc87fd064
Branch:      yt
User:        ngoldbaum
Date:        2017-04-17 21:18:55+00:00
Summary:     Merged in ngoldbaum/yt (pull request #2566)

Remove "yield assert" pattern from all tests

Approved-by: Alexander Lindsay <al007 at illinois.edu>
Approved-by: Kacper Kowalik <xarthisius.kk at gmail.com>
Approved-by: Matt Turk <matthewturk at gmail.com>
Affected #:  84 files

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -301,7 +301,7 @@
 def test_voigt_profiles():
     a = 1.7e-4
     x = np.linspace(5.0, -3.6, 60)
-    yield assert_allclose_units, voigt_old(a, x), voigt_scipy(a, x), 1e-8
+    assert_allclose_units(voigt_old(a, x), voigt_scipy(a, x), 1e-8)
 
 @requires_file(GIZMO_PLUS)
 @requires_answer_testing()

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -52,6 +52,7 @@
                  validators=[ValidateSpatial(0)],
                  take_log=False,
                  display_field=False,
+                 sampling_type='cell',
                  units='')
 
 class Clump(TreeContainer):

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -10,9 +10,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.analysis_modules.photon_simulator.api import \
-    XSpecThermalModel, XSpecAbsorbModel, \
-    ThermalPhotonModel, PhotonList
+import warnings
+
+with warnings.catch_warnings():
+    warnings.simplefilter("ignore")
+    from yt.analysis_modules.photon_simulator.api import \
+        XSpecThermalModel, XSpecAbsorbModel, \
+        ThermalPhotonModel, PhotonList
 from yt.config import ytcfg
 from yt.testing import requires_file, requires_module
 import numpy as np

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -172,10 +172,12 @@
         self.current_v = 0.0
 
         _vlos = create_vlos(normal, self.no_shifting)
-        self.ds.add_field(("gas","v_los"), function=_vlos, units="cm/s")
+        self.ds.add_field(("gas","v_los"), function=_vlos, units="cm/s",
+                          sampling_type='cell')
 
         _intensity = self._create_intensity()
-        self.ds.add_field(("gas","intensity"), function=_intensity, units=self.field_units)
+        self.ds.add_field(("gas","intensity"), function=_intensity,
+                          units=self.field_units, sampling_type='cell')
 
         if method == "integrate" and weight_field is None:
             self.proj_units = str(ds.quan(1.0, self.field_units+"*cm").units)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/analysis_modules/ppv_cube/tests/test_ppv.py
--- a/yt/analysis_modules/ppv_cube/tests/test_ppv.py
+++ b/yt/analysis_modules/ppv_cube/tests/test_ppv.py
@@ -46,7 +46,7 @@
     a = cube.data.mean(axis=(0,1)).v
     b = dv*np.exp(-((cube.vmid+v_shift)/v_th)**2)/(np.sqrt(np.pi)*v_th)
 
-    yield assert_allclose_units, a, b, 1.0e-2
+    assert_allclose_units(a, b, 1.0e-2)
 
     E_0 = 6.8*u.keV
 
@@ -58,4 +58,4 @@
 
     c = dE*np.exp(-((cube.vmid-E_shift)/delta_E)**2)/(np.sqrt(np.pi)*delta_E)
 
-    yield assert_allclose_units, a, c, 1.0e-2
+    assert_allclose_units(a, c, 1.0e-2)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -113,7 +113,8 @@
     deltaI = np.zeros((3,nx,ny))
     for i in range(3):
         deltaI[i,:,:] = full_szpack3d(ds, xinit[i])
-        yield assert_almost_equal, deltaI[i,:,:], np.array(szprj["%d_GHz" % int(freqs[i])]), 6
+        assert_almost_equal(
+            deltaI[i,:,:], np.array(szprj["%d_GHz" % int(freqs[i])]), 6)
 
 M7 = "DD0010/moving7_0010"
 @requires_module("SZpack")

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -1195,7 +1195,7 @@
                 sampling_type = "particle"
         if sampling_type is None:
             warnings.warn("Because 'sampling_type' not specified, yt will "
-                          "assume a cell 'sampling_type'")
+                          "assume a cell 'sampling_type'", stacklevel=2)
             sampling_type = "cell"
         self.field_info.add_field(name, sampling_type, function=function, **kwargs)
         self.field_info._show_field_errors.append(name)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_chunking.py
--- a/yt/data_objects/tests/test_chunking.py
+++ b/yt/data_objects/tests/test_chunking.py
@@ -34,7 +34,7 @@
                 coords['i'][t] = uconcatenate(coords['i'][t])
                 coords['f'][t].sort()
                 coords['i'][t].sort()
-            yield assert_equal, coords['f']['io'], coords['f']['all']
-            yield assert_equal, coords['f']['io'], coords['f']['spatial']
-            yield assert_equal, coords['i']['io'], coords['i']['all']
-            yield assert_equal, coords['i']['io'], coords['i']['spatial']
+            assert_equal(coords['f']['io'], coords['f']['all'])
+            assert_equal(coords['f']['io'], coords['f']['spatial'])
+            assert_equal(coords['i']['io'], coords['i']['all'])
+            assert_equal(coords['i']['io'], coords['i']['spatial'])

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_compose.py
--- a/yt/data_objects/tests/test_compose.py
+++ b/yt/data_objects/tests/test_compose.py
@@ -34,7 +34,7 @@
     empty = np.array([])
     for n in [1, 2, 4, 8]:
         ds = fake_random_ds(64, nprocs=n)
-        ds.add_field(("index", "ID"), function=_IDFIELD)
+        ds.add_field(("index", "ID"), sampling_type='cell', function=_IDFIELD)
 
         # position parameters for initial region
         center = [0.25]*3
@@ -56,24 +56,24 @@
         # subselect non-overlapping 0, 1, 2, 3D regions
         for data1 in sources:
             data2 = ds.sphere(center, radius, data_source=data1)
-            yield assert_array_equal, data2['index', 'ID'], empty
+            assert_array_equal(data2['index', 'ID'], empty)
 
             data2 = ds.region(center, left_edge, right_edge, data_source=data1)
-            yield assert_array_equal, data2['index', 'ID'], empty  
+            assert_array_equal(data2['index', 'ID'], empty  )
 
             data2 = ds.disk(center, normal, radius, height, data_source=data1)
-            yield assert_array_equal, data2['index', 'ID'], empty
+            assert_array_equal(data2['index', 'ID'], empty)
 
             for d in range(3):
                 data2 = ds.slice(d, center[d], data_source=data1)
-                yield assert_array_equal, data2['index', 'ID'], empty
+                assert_array_equal(data2['index', 'ID'], empty)
 
             for d in range(3):
                 data2 = ds.ortho_ray(d, center[0:d] + center[d+1:], data_source=data1)
-                yield assert_array_equal, data2['index', 'ID'], empty
+                assert_array_equal(data2['index', 'ID'], empty)
 
             data2 = ds.point(center, data_source=data1)
-            yield assert_array_equal, data2['index', 'ID'], empty
+            assert_array_equal(data2['index', 'ID'], empty)
 
 def test_compose_overlap():
     r"""Test to make sure that composed data objects that do
@@ -81,7 +81,7 @@
     """
     for n in [1, 2, 4, 8]:
         ds = fake_random_ds(64, nprocs=n)
-        ds.add_field(("index", "ID"), function=_IDFIELD)
+        ds.add_field(("index", "ID"), sampling_type='cell', function=_IDFIELD)
 
         # position parameters for initial region
         center = [0.4, 0.5, 0.5]
@@ -109,21 +109,21 @@
             id2 = data2['index', 'ID']
             id3 = data3['index', 'ID']
             id3.sort()
-            yield assert_array_equal, uintersect1d(id1, id2), id3
+            assert_array_equal(uintersect1d(id1, id2), id3)
 
             data2 = ds.region(center, left_edge, right_edge)
             data3 = ds.region(center, left_edge, right_edge, data_source=data1)
             id2 = data2['index', 'ID']
             id3 = data3['index', 'ID']
             id3.sort()
-            yield assert_array_equal, uintersect1d(id1, id2), id3
+            assert_array_equal(uintersect1d(id1, id2), id3)
 
             data2 = ds.disk(center, normal, radius, height)
             data3 = ds.disk(center, normal, radius, height, data_source=data1)
             id2 = data2['index', 'ID']
             id3 = data3['index', 'ID']
             id3.sort()
-            yield assert_array_equal, uintersect1d(id1, id2), id3
+            assert_array_equal(uintersect1d(id1, id2), id3)
 
             for d in range(3):
                 data2 = ds.slice(d, center[d])
@@ -131,7 +131,7 @@
                 id2 = data2['index', 'ID']
                 id3 = data3['index', 'ID']
                 id3.sort()
-                yield assert_array_equal, uintersect1d(id1, id2), id3
+                assert_array_equal(uintersect1d(id1, id2), id3)
 
             for d in range(3):
                 data2 = ds.ortho_ray(d, center[0:d] + center[d+1:])
@@ -139,11 +139,11 @@
                 id2 = data2['index', 'ID']
                 id3 = data3['index', 'ID']
                 id3.sort()
-                yield assert_array_equal, uintersect1d(id1, id2), id3
+                assert_array_equal(uintersect1d(id1, id2), id3)
 
             data2 = ds.point(center)
             data3 = ds.point(center, data_source=data1)
             id2 = data2['index', 'ID']
             id3 = data3['index', 'ID']
             id3.sort()
-            yield assert_array_equal, uintersect1d(id1, id2), id3
+            assert_array_equal(uintersect1d(id1, id2), id3)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -22,42 +22,42 @@
             cg = ds.covering_grid(level, [0.0, 0.0, 0.0],
                     dn * ds.domain_dimensions)
             # Test coordinate generation
-            yield assert_equal, np.unique(cg["dx"]).size, 1
+            assert_equal(np.unique(cg["dx"]).size, 1)
             xmi = cg["x"].min()
             xma = cg["x"].max()
             dx = cg["dx"].flat[0:1]
             edges = ds.arr([[0,1],[0,1],[0,1]], 'code_length')
-            yield assert_equal, xmi, edges[0,0] + dx/2.0
-            yield assert_equal, xmi, cg["x"][0,0,0]
-            yield assert_equal, xmi, cg["x"][0,1,1]
-            yield assert_equal, xma, edges[0,1] - dx/2.0
-            yield assert_equal, xma, cg["x"][-1,0,0]
-            yield assert_equal, xma, cg["x"][-1,1,1]
-            yield assert_equal, np.unique(cg["dy"]).size, 1
+            assert_equal(xmi, edges[0,0] + dx/2.0)
+            assert_equal(xmi, cg["x"][0,0,0])
+            assert_equal(xmi, cg["x"][0,1,1])
+            assert_equal(xma, edges[0,1] - dx/2.0)
+            assert_equal(xma, cg["x"][-1,0,0])
+            assert_equal(xma, cg["x"][-1,1,1])
+            assert_equal(np.unique(cg["dy"]).size, 1)
             ymi = cg["y"].min()
             yma = cg["y"].max()
             dy = cg["dy"][0]
-            yield assert_equal, ymi, edges[1,0] + dy/2.0
-            yield assert_equal, ymi, cg["y"][0,0,0]
-            yield assert_equal, ymi, cg["y"][1,0,1]
-            yield assert_equal, yma, edges[1,1] - dy/2.0
-            yield assert_equal, yma, cg["y"][0,-1,0]
-            yield assert_equal, yma, cg["y"][1,-1,1]
-            yield assert_equal, np.unique(cg["dz"]).size, 1
+            assert_equal(ymi, edges[1,0] + dy/2.0)
+            assert_equal(ymi, cg["y"][0,0,0])
+            assert_equal(ymi, cg["y"][1,0,1])
+            assert_equal(yma, edges[1,1] - dy/2.0)
+            assert_equal(yma, cg["y"][0,-1,0])
+            assert_equal(yma, cg["y"][1,-1,1])
+            assert_equal(np.unique(cg["dz"]).size, 1)
             zmi = cg["z"].min()
             zma = cg["z"].max()
             dz = cg["dz"][0]
-            yield assert_equal, zmi, edges[2,0] + dz/2.0
-            yield assert_equal, zmi, cg["z"][0,0,0]
-            yield assert_equal, zmi, cg["z"][1,1,0]
-            yield assert_equal, zma, edges[2,1] - dz/2.0
-            yield assert_equal, zma, cg["z"][0,0,-1]
-            yield assert_equal, zma, cg["z"][1,1,-1]
+            assert_equal(zmi, edges[2,0] + dz/2.0)
+            assert_equal(zmi, cg["z"][0,0,0])
+            assert_equal(zmi, cg["z"][1,1,0])
+            assert_equal(zma, edges[2,1] - dz/2.0)
+            assert_equal(zma, cg["z"][0,0,-1])
+            assert_equal(zma, cg["z"][1,1,-1])
             # Now we test other attributes
-            yield assert_equal, cg["ones"].max(), 1.0
-            yield assert_equal, cg["ones"].min(), 1.0
-            yield assert_equal, cg["grid_level"], level
-            yield assert_equal, cg["cell_volume"].sum(), ds.domain_width.prod()
+            assert_equal(cg["ones"].max(), 1.0)
+            assert_equal(cg["ones"].min(), 1.0)
+            assert_equal(cg["grid_level"], level)
+            assert_equal(cg["cell_volume"].sum(), ds.domain_width.prod())
             for g in ds.index.grids:
                 di = g.get_global_startindex()
                 dd = g.ActiveDimensions
@@ -65,7 +65,7 @@
                     f = cg["density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
                                       dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
                                       dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
-                    yield assert_equal, f, g["density"]
+                    assert_equal(f, g["density"])
 
 def test_smoothed_covering_grid():
     # We decompose in different ways
@@ -75,9 +75,9 @@
             dn = ds.refine_by**level 
             cg = ds.smoothed_covering_grid(level, [0.0, 0.0, 0.0],
                     dn * ds.domain_dimensions)
-            yield assert_equal, cg["ones"].max(), 1.0
-            yield assert_equal, cg["ones"].min(), 1.0
-            yield assert_equal, cg["cell_volume"].sum(), ds.domain_width.prod()
+            assert_equal(cg["ones"].max(), 1.0)
+            assert_equal(cg["ones"].min(), 1.0)
+            assert_equal(cg["cell_volume"].sum(), ds.domain_width.prod())
             for g in ds.index.grids:
                 if level != g.Level: continue
                 di = g.get_global_startindex()
@@ -86,7 +86,7 @@
                     f = cg["density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
                                       dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
                                       dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
-                    yield assert_equal, f, g["density"]
+                    assert_equal(f, g["density"])
 
 
 def test_arbitrary_grid():
@@ -133,7 +133,7 @@
                     2**ref_level * ds.domain_dimensions)
             ag = ds.arbitrary_grid([0.0, 0.0, 0.0], [1.0, 1.0, 1.0],
                     2**ref_level * ds.domain_dimensions)
-            yield assert_almost_equal, cg["density"], ag["density"]
+            assert_almost_equal(cg["density"], ag["density"])
 
 output_00080 = "output_00080/info_00080.txt"
 @requires_file(output_00080)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_cutting_plane.py
--- a/yt/data_objects/tests/test_cutting_plane.py
+++ b/yt/data_objects/tests/test_cutting_plane.py
@@ -26,9 +26,9 @@
         center = [0.5, 0.5, 0.5]
         normal = [1, 1, 1]
         cut = ds.cutting(normal, center)
-        yield assert_equal, cut["ones"].sum(), cut["ones"].size
-        yield assert_equal, cut["ones"].min(), 1.0
-        yield assert_equal, cut["ones"].max(), 1.0
+        assert_equal(cut["ones"].sum(), cut["ones"].size)
+        assert_equal(cut["ones"].min(), 1.0)
+        assert_equal(cut["ones"].max(), 1.0)
         pw = cut.to_pw(fields='density')
         for p in pw.plots.values():
             tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
@@ -39,20 +39,14 @@
             frb = cut.to_frb(width, 64)
             for cut_field in ['ones', 'density']:
                 fi = ds._get_field_info("unknown", cut_field)
-                yield assert_equal, frb[cut_field].info['data_source'], \
-                    cut.__str__()
-                yield assert_equal, frb[cut_field].info['axis'], \
-                    4
-                yield assert_equal, frb[cut_field].info['field'], \
-                    cut_field
-                yield assert_equal, frb[cut_field].units, \
-                    Unit(fi.units)
-                yield assert_equal, frb[cut_field].info['xlim'], \
-                    frb.bounds[:2]
-                yield assert_equal, frb[cut_field].info['ylim'], \
-                    frb.bounds[2:]
-                yield assert_equal, frb[cut_field].info['length_to_cm'], \
-                    ds.length_unit.in_cgs()
-                yield assert_equal, frb[cut_field].info['center'], \
-                    cut.center
+                assert_equal(frb[cut_field].info['data_source'],
+                             cut.__str__())
+                assert_equal(frb[cut_field].info['axis'], 4)
+                assert_equal(frb[cut_field].info['field'], cut_field)
+                assert_equal(frb[cut_field].units, Unit(fi.units))
+                assert_equal(frb[cut_field].info['xlim'], frb.bounds[:2])
+                assert_equal(frb[cut_field].info['ylim'], frb.bounds[2:])
+                assert_equal(frb[cut_field].info['length_to_cm'],
+                             ds.length_unit.in_cgs())
+                assert_equal(frb[cut_field].info['center'], cut.center)
     teardown_func(fns)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_data_collection.py
--- a/yt/data_objects/tests/test_data_collection.py
+++ b/yt/data_objects/tests/test_data_collection.py
@@ -17,14 +17,14 @@
         crho = coll["density"].sum(dtype="float64").to_ndarray()
         grho = np.sum([g["density"].sum(dtype="float64") for g in ds.index.grids],
                       dtype="float64")
-        yield assert_rel_equal, np.array([crho]), np.array([grho]), 12
-        yield assert_equal, coll.size, ds.domain_dimensions.prod()
+        assert_rel_equal(np.array([crho]), np.array([grho]), 12)
+        assert_equal(coll.size, ds.domain_dimensions.prod())
         for gi in range(ds.index.num_grids):
             grids = ds.index.grids[:gi+1]
             coll = ds.data_collection(grids)
             crho = coll["density"].sum(dtype="float64")
             grho = np.sum([g["density"].sum(dtype="float64") for g in grids],
                           dtype="float64")
-            yield assert_rel_equal, np.array([crho]), np.array([grho]), 12
-            yield assert_equal, coll.size, \
-                    sum(g.ActiveDimensions.prod() for g in grids)
+            assert_rel_equal(np.array([crho]), np.array([grho]), 12)
+            assert_equal(coll.size,
+                         sum(g.ActiveDimensions.prod() for g in grids))

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_dataset_access.py
--- a/yt/data_objects/tests/test_dataset_access.py
+++ b/yt/data_objects/tests/test_dataset_access.py
@@ -37,34 +37,34 @@
     # First, no string units
     reg1 = ds.r[0.2:0.3,0.4:0.6,:]
     reg2 = ds.region([0.25, 0.5, 0.5], [0.2, 0.4, 0.0], [0.3, 0.6, 1.0])
-    yield assert_equal, reg1["density"], reg2["density"]
+    assert_equal(reg1["density"], reg2["density"])
 
     # Now, string units in some -- 1.0 == cm
     reg1 = ds.r[(0.1, 'cm'):(0.5, 'cm'), :, (0.25, 'cm'): (0.35, 'cm')]
     reg2 = ds.region([0.3, 0.5, 0.3], [0.1, 0.0, 0.25], [0.5, 1.0, 0.35])
-    yield assert_equal, reg1["density"], reg2["density"]
+    assert_equal(reg1["density"], reg2["density"])
 
     # Now, string units in some -- 1.0 == cm
     reg1 = ds.r[(0.1, 'cm'):(0.5, 'cm'), :, 0.25:0.35]
     reg2 = ds.region([0.3, 0.5, 0.3], [0.1, 0.0, 0.25], [0.5, 1.0, 0.35])
-    yield assert_equal, reg1["density"], reg2["density"]
+    assert_equal(reg1["density"], reg2["density"])
 
     # And, lots of : usage!
     reg1 = ds.r[:, :, :]
     reg2 = ds.all_data()
-    yield assert_equal, reg1["density"], reg2["density"]
+    assert_equal(reg1["density"], reg2["density"])
 
 def test_accessing_all_data():
     # This will test first that we can access all_data, and next that we can
     # access it multiple times and get the *same object*.
     ds = fake_amr_ds(fields=["density"])
     dd = ds.all_data()
-    yield assert_equal, ds.r["density"], dd["density"]
+    assert_equal(ds.r["density"], dd["density"])
     # Now let's assert that it's the same object
     rho = ds.r["density"]
     rho *= 2.0
-    yield assert_equal, dd["density"]*2.0, ds.r["density"]
-    yield assert_equal, dd["gas", "density"]*2.0, ds.r["gas", "density"]
+    assert_equal(dd["density"]*2.0, ds.r["density"])
+    assert_equal(dd["gas", "density"]*2.0, ds.r["gas", "density"])
 
 def test_particle_counts():
     ds = fake_random_ds(16, particles=100)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_derived_quantities.py
--- a/yt/data_objects/tests/test_derived_quantities.py
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -15,17 +15,17 @@
                 "velocity_x", "velocity_y", "velocity_z"))
         for sp in [ds.sphere("c", (0.25, 'unitary')), ds.r[0.5,:,:]]:
             mi, ma = sp.quantities["Extrema"]("density")
-            yield assert_equal, mi, np.nanmin(sp["density"])
-            yield assert_equal, ma, np.nanmax(sp["density"])
+            assert_equal(mi, np.nanmin(sp["density"]))
+            assert_equal(ma, np.nanmax(sp["density"]))
             dd = ds.all_data()
             mi, ma = dd.quantities["Extrema"]("density")
-            yield assert_equal, mi, np.nanmin(dd["density"])
-            yield assert_equal, ma, np.nanmax(dd["density"])
+            assert_equal(mi, np.nanmin(dd["density"]))
+            assert_equal(ma, np.nanmax(dd["density"]))
             sp = ds.sphere("max", (0.25, 'unitary'))
-            yield assert_equal, np.any(np.isnan(sp["radial_velocity"])), False
+            assert_equal(np.any(np.isnan(sp["radial_velocity"])), False)
             mi, ma = dd.quantities["Extrema"]("radial_velocity")
-            yield assert_equal, mi, np.nanmin(dd["radial_velocity"])
-            yield assert_equal, ma, np.nanmax(dd["radial_velocity"])
+            assert_equal(mi, np.nanmin(dd["radial_velocity"]))
+            assert_equal(ma, np.nanmax(dd["radial_velocity"]))
 
 def test_average():
     for nprocs in [1, 2, 4, 8]:
@@ -33,11 +33,11 @@
         for ad in [ds.all_data(), ds.r[0.5, :, :]]:
         
             my_mean = ad.quantities["WeightedAverageQuantity"]("density", "ones")
-            yield assert_rel_equal, my_mean, ad["density"].mean(), 12
+            assert_rel_equal(my_mean, ad["density"].mean(), 12)
 
             my_mean = ad.quantities["WeightedAverageQuantity"]("density", "cell_mass")
             a_mean = (ad["density"] * ad["cell_mass"]).sum() / ad["cell_mass"].sum()
-            yield assert_rel_equal, my_mean, a_mean, 12
+            assert_rel_equal(my_mean, a_mean, 12)
 
 def test_variance():
     for nprocs in [1, 2, 4, 8]:
@@ -45,15 +45,15 @@
         for ad in [ds.all_data(), ds.r[0.5, :, :]]:
         
             my_std, my_mean = ad.quantities["WeightedVariance"]("density", "ones")
-            yield assert_rel_equal, my_mean, ad["density"].mean(), 12
-            yield assert_rel_equal, my_std, ad["density"].std(), 12
+            assert_rel_equal(my_mean, ad["density"].mean(), 12)
+            assert_rel_equal(my_std, ad["density"].std(), 12)
 
             my_std, my_mean = ad.quantities["WeightedVariance"]("density", "cell_mass")        
             a_mean = (ad["density"] * ad["cell_mass"]).sum() / ad["cell_mass"].sum()
-            yield assert_rel_equal, my_mean, a_mean, 12
+            assert_rel_equal(my_mean, a_mean, 12)
             a_std = np.sqrt((ad["cell_mass"] * (ad["density"] - a_mean)**2).sum() / 
                             ad["cell_mass"].sum())
-            yield assert_rel_equal, my_std, a_std, 12
+            assert_rel_equal(my_std, a_std, 12)
 
 def test_max_location():
     for nprocs in [1, 2, 4, 8]:
@@ -62,13 +62,13 @@
 
             mv, x, y, z = ad.quantities.max_location(("gas", "density"))
 
-            yield assert_equal, mv, ad["density"].max()
+            assert_equal(mv, ad["density"].max())
 
             mi = np.argmax(ad["density"])
 
-            yield assert_equal, ad["x"][mi], x
-            yield assert_equal, ad["y"][mi], y
-            yield assert_equal, ad["z"][mi], z
+            assert_equal(ad["x"][mi], x)
+            assert_equal(ad["y"][mi], y)
+            assert_equal(ad["z"][mi], z)
 
 def test_min_location():
     for nprocs in [1, 2, 4, 8]:
@@ -77,13 +77,13 @@
 
             mv, x, y, z = ad.quantities.min_location(("gas", "density"))
 
-            yield assert_equal, mv, ad["density"].min()
+            assert_equal(mv, ad["density"].min())
 
             mi = np.argmin(ad["density"])
 
-            yield assert_equal, ad["x"][mi], x
-            yield assert_equal, ad["y"][mi], y
-            yield assert_equal, ad["z"][mi], z
+            assert_equal(ad["x"][mi], x)
+            assert_equal(ad["y"][mi], y)
+            assert_equal(ad["z"][mi], z)
 
 def test_sample_at_min_field_values():
     for nprocs in [1, 2, 4, 8]:
@@ -94,12 +94,12 @@
             mv, temp, vm = ad.quantities.sample_at_min_field_values(
                 "density", ["temperature", "velocity_x"])
 
-            yield assert_equal, mv, ad["density"].min()
+            assert_equal(mv, ad["density"].min())
 
             mi = np.argmin(ad["density"])
 
-            yield assert_equal, ad["temperature"][mi], temp
-            yield assert_equal, ad["velocity_x"][mi], vm
+            assert_equal(ad["temperature"][mi], temp)
+            assert_equal(ad["velocity_x"][mi], vm)
 
 def test_sample_at_max_field_values():
     for nprocs in [1, 2, 4, 8]:
@@ -110,13 +110,9 @@
             mv, temp, vm = ad.quantities.sample_at_max_field_values(
                 "density", ["temperature", "velocity_x"])
 
-            yield assert_equal, mv, ad["density"].max()
+            assert_equal(mv, ad["density"].max())
 
             mi = np.argmax(ad["density"])
 
-            yield assert_equal, ad["temperature"][mi], temp
-            yield assert_equal, ad["velocity_x"][mi], vm
-
-if __name__ == "__main__":
-    for i in test_extrema():
-        i[0](*i[1:])
+            assert_equal(ad["temperature"][mi], temp)
+            assert_equal(ad["velocity_x"][mi], vm)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_ellipsoid.py
--- a/yt/data_objects/tests/test_ellipsoid.py
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -40,7 +40,7 @@
                 e0 = e0s[:,i]
                 tilt = tilts[i]
                 ell = ds.ellipsoid(c, A, B, C, e0, tilt)
-                yield assert_array_less, ell["radius"], A
+                assert_array_less(ell["radius"], A)
                 p = np.array([ell[ax] for ax in 'xyz'])
                 dot_evec = [np.zeros_like(ell["radius"]) for i in range(3)]
                 vecs = [ell._e0, ell._e1, ell._e2]
@@ -55,4 +55,4 @@
                 dist = 0
                 for ax_i in range(3):
                     dist += dot_evec[ax_i]**2.0 / mags[ax_i]**2.0
-                yield assert_array_less, dist, 1.0
+                assert_array_less(dist, 1.0)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_extract_regions.py
--- a/yt/data_objects/tests/test_extract_regions.py
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -23,17 +23,15 @@
         t = ( (dd["temperature"] > 0.5 ) 
             & (dd["density"] < 0.75 )
             & (dd["velocity_x"] > 0.25 ) )
-        yield assert_equal, np.all(r["temperature"] > 0.5), True
-        yield assert_equal, np.all(r["density"] < 0.75), True
-        yield assert_equal, np.all(r["velocity_x"] > 0.25), True
-        yield assert_equal, np.sort(dd["density"][t]), np.sort(r["density"])
-        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
-        # We are disabling these, as cutting cut regions does not presently
-        # work
-        #r2 = r.cut_region( [ "obj['temperature'] < 0.75" ] )
-        #t2 = (r["temperature"] < 0.75)
-        #yield assert_equal, np.sort(r2["temperature"]), np.sort(r["temperature"][t2])
-        #yield assert_equal, np.all(r2["temperature"] < 0.75), True
+        assert_equal(np.all(r["temperature"] > 0.5), True)
+        assert_equal(np.all(r["density"] < 0.75), True)
+        assert_equal(np.all(r["velocity_x"] > 0.25), True)
+        assert_equal(np.sort(dd["density"][t]), np.sort(r["density"]))
+        assert_equal(np.sort(dd["x"][t]), np.sort(r["x"]))
+        r2 = r.cut_region( [ "obj['temperature'] < 0.75" ] )
+        t2 = (r["temperature"] < 0.75)
+        assert_equal(np.sort(r2["temperature"]), np.sort(r["temperature"][t2]))
+        assert_equal(np.all(r2["temperature"] < 0.75), True)
 
         # Now we can test some projections
         dd = ds.all_data()
@@ -42,9 +40,9 @@
             p1 = ds.proj("density", 0, data_source=dd, weight_field=weight)
             p2 = ds.proj("density", 0, data_source=cr, weight_field=weight)
             for f in p1.field_data:
-                yield assert_almost_equal, p1[f], p2[f]
+                assert_almost_equal(p1[f], p2[f])
         cr = dd.cut_region(["obj['density'] > 0.25"])
         p2 = ds.proj("density", 2, data_source=cr)
-        yield assert_equal, p2["density"].max() > 0.25, True
+        assert_equal(p2["density"].max() > 0.25, True)
         p2 = ds.proj("density", 2, data_source=cr, weight_field = "density")
-        yield assert_equal, p2["density"].max() > 0.25, True
+        assert_equal(p2["density"].max() > 0.25, True)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_image_array.py
--- a/yt/data_objects/tests/test_image_array.py
+++ b/yt/data_objects/tests/test_image_array.py
@@ -26,14 +26,14 @@
     im_arr = ImageArray(dummy_image(10.0, 4))
 
     new_im = im_arr.rescale(inline=False)
-    yield assert_equal, im_arr[:, :, :3].max(), 2 * 10.
-    yield assert_equal, im_arr[:, :, 3].max(), 3 * 10.
-    yield assert_equal, new_im[:, :, :3].sum(axis=2).max(), 1.0
-    yield assert_equal, new_im[:, :, 3].max(), 1.0
+    assert_equal(im_arr[:, :, :3].max(), 2 * 10.)
+    assert_equal(im_arr[:, :, 3].max(), 3 * 10.)
+    assert_equal(new_im[:, :, :3].sum(axis=2).max(), 1.0)
+    assert_equal(new_im[:, :, 3].max(), 1.0)
 
     im_arr.rescale()
-    yield assert_equal, im_arr[:, :, :3].sum(axis=2).max(), 1.0
-    yield assert_equal, im_arr[:, :, 3].max(), 1.0
+    assert_equal(im_arr[:, :, :3].sum(axis=2).max(), 1.0)
+    assert_equal(im_arr[:, :, 3].max(), 1.0)
 
 
 class TestImageArray(unittest.TestCase):

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_numpy_ops.py
--- a/yt/data_objects/tests/test_numpy_ops.py
+++ b/yt/data_objects/tests/test_numpy_ops.py
@@ -136,18 +136,18 @@
         ad = ds.all_data()
 
         q = ad.argmin("density", axis=["density"])
-        yield assert_equal, q, ad["density"].min()
+        assert_equal(q, ad["density"].min())
 
         q1, q2 = ad.argmin("density", axis=["density", "temperature"])
         mi = np.argmin(ad["density"])
-        yield assert_equal, q1, ad["density"].min()
-        yield assert_equal, q2, ad["temperature"][mi]
+        assert_equal(q1, ad["density"].min())
+        assert_equal(q2, ad["temperature"][mi])
 
         pos = ad.argmin("density")
         mi = np.argmin(ad["density"])
-        yield assert_equal, pos[0], ad["x"][mi]
-        yield assert_equal, pos[1], ad["y"][mi]
-        yield assert_equal, pos[2], ad["z"][mi]
+        assert_equal(pos[0], ad["x"][mi])
+        assert_equal(pos[1], ad["y"][mi])
+        assert_equal(pos[2], ad["z"][mi])
 
 def test_argmax():
     for nprocs in [-1, 1, 2, 16]:
@@ -160,15 +160,15 @@
         ad = ds.all_data()
 
         q = ad.argmax("density", axis=["density"])
-        yield assert_equal, q, ad["density"].max()
+        assert_equal(q, ad["density"].max())
 
         q1, q2 = ad.argmax("density", axis=["density", "temperature"])
         mi = np.argmax(ad["density"])
-        yield assert_equal, q1, ad["density"].max()
-        yield assert_equal, q2, ad["temperature"][mi]
+        assert_equal(q1, ad["density"].max())
+        assert_equal(q2, ad["temperature"][mi])
 
         pos = ad.argmax("density")
         mi = np.argmax(ad["density"])
-        yield assert_equal, pos[0], ad["x"][mi]
-        yield assert_equal, pos[1], ad["y"][mi]
-        yield assert_equal, pos[2], ad["z"][mi]
+        assert_equal(pos[0], ad["x"][mi])
+        assert_equal(pos[1], ad["y"][mi])
+        assert_equal(pos[2], ad["z"][mi])

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_ortho_rays.py
--- a/yt/data_objects/tests/test_ortho_rays.py
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -25,5 +25,5 @@
                    (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
                     0.5 * dx[my_axes[1]])
 
-        yield assert_equal, my_oray['density'].sum(), \
-                            my_all['density'][my_cells].sum()
+        assert_equal(my_oray['density'].sum(),
+                     my_all['density'][my_cells].sum())

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_pickle.py
--- a/yt/data_objects/tests/test_pickle.py
+++ b/yt/data_objects/tests/test_pickle.py
@@ -52,9 +52,9 @@
 
     assert_equal.description = \
         "%s: File was pickle-loaded succesfully" % __name__
-    yield assert_equal, test_load is not None, True
+    assert_equal(test_load is not None, True)
     assert_equal.description = \
         "%s: Length of pickle-loaded connected set object" % __name__
-    yield assert_equal, len(contours[1][0]), len(test_load)
+    assert_equal(len(contours[1][0]), len(test_load))
 
     os.remove(cpklfile.name)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -55,9 +55,8 @@
 
                 for p1d in [direct_profile, indirect_profile_s,
                             indirect_profile_t]:
-                    yield assert_equal, p1d["index", "ones"].sum(), nv
-                    yield assert_rel_equal, tt, \
-                        p1d["gas", "temperature"].sum(), 7
+                    assert_equal(p1d["index", "ones"].sum(), nv)
+                    assert_rel_equal(tt, p1d["gas", "temperature"].sum(), 7)
 
                 p2d = Profile2D(
                     dd,
@@ -65,8 +64,8 @@
                     "temperature", nb, tmi*e1, tma*e2, lf,
                     weight_field=None)
                 p2d.add_fields(["ones", "temperature"])
-                yield assert_equal, p2d["ones"].sum(), nv
-                yield assert_rel_equal, tt, p2d["temperature"].sum(), 7
+                assert_equal(p2d["ones"].sum(), nv)
+                assert_rel_equal(tt, p2d["temperature"].sum(), 7)
 
                 p3d = Profile3D(
                     dd,
@@ -75,39 +74,39 @@
                     "dinosaurs",   nb, dmi*e1, dma*e2, lf,
                     weight_field=None)
                 p3d.add_fields(["ones", "temperature"])
-                yield assert_equal, p3d["ones"].sum(), nv
-                yield assert_rel_equal, tt, p3d["temperature"].sum(), 7
+                assert_equal(p3d["ones"].sum(), nv)
+                assert_rel_equal(tt, p3d["temperature"].sum(), 7)
 
         p1d = Profile1D(dd, "x", nb, 0.0, 1.0, False,
                         weight_field = None)
         p1d.add_fields("ones")
         av = nv / nb
-        yield assert_equal, p1d["ones"], np.ones(nb)*av
+        assert_equal(p1d["ones"], np.ones(nb)*av)
 
         # We re-bin ones with a weight now
         p1d = Profile1D(dd, "x", nb, 0.0, 1.0, False,
                         weight_field = "temperature")
         p1d.add_fields(["ones"])
-        yield assert_equal, p1d["ones"], np.ones(nb)
+        assert_equal(p1d["ones"], np.ones(nb))
 
         # Verify we can access "ones" after adding a new field
         # See issue 988
         p1d.add_fields(["density"])
-        yield assert_equal, p1d["ones"], np.ones(nb)
+        assert_equal(p1d["ones"], np.ones(nb))
 
         p2d = Profile2D(dd, "x", nb, 0.0, 1.0, False,
                             "y", nb, 0.0, 1.0, False,
                             weight_field = None)
         p2d.add_fields("ones")
         av = nv / nb**2
-        yield assert_equal, p2d["ones"], np.ones((nb, nb))*av
+        assert_equal(p2d["ones"], np.ones((nb, nb))*av)
 
         # We re-bin ones with a weight now
         p2d = Profile2D(dd, "x", nb, 0.0, 1.0, False,
                             "y", nb, 0.0, 1.0, False,
                             weight_field = "temperature")
         p2d.add_fields(["ones"])
-        yield assert_equal, p2d["ones"], np.ones((nb, nb))
+        assert_equal(p2d["ones"], np.ones((nb, nb)))
 
         p3d = Profile3D(dd, "x", nb, 0.0, 1.0, False,
                             "y", nb, 0.0, 1.0, False,
@@ -115,7 +114,7 @@
                             weight_field = None)
         p3d.add_fields("ones")
         av = nv / nb**3
-        yield assert_equal, p3d["ones"], np.ones((nb, nb, nb))*av
+        assert_equal(p3d["ones"], np.ones((nb, nb, nb))*av)
 
         # We re-bin ones with a weight now
         p3d = Profile3D(dd, "x", nb, 0.0, 1.0, False,
@@ -123,7 +122,7 @@
                             "z", nb, 0.0, 1.0, False,
                             weight_field = "temperature")
         p3d.add_fields(["ones"])
-        yield assert_equal, p3d["ones"], np.ones((nb,nb,nb))
+        assert_equal(p3d["ones"], np.ones((nb,nb,nb)))
 
 extrema_s = {'particle_position_x': (0, 1)}
 logs_s = {'particle_position_x': False}
@@ -139,32 +138,32 @@
         p1d = Profile1D(dd, "particle_position_x", 128,
                         0.0, 1.0, False, weight_field = None)
         p1d.add_fields(["particle_ones"])
-        yield assert_equal, p1d["particle_ones"].sum(), 32**3
+        assert_equal(p1d["particle_ones"].sum(), 32**3)
 
         p1d = create_profile(dd, ["particle_position_x"], ["particle_ones"],
                              weight_field=None, n_bins=128, extrema=extrema_s,
                              logs=logs_s)
-        yield assert_equal, p1d["particle_ones"].sum(), 32**3
+        assert_equal(p1d["particle_ones"].sum(), 32**3)
 
         p1d = create_profile(dd,
                              [("all", "particle_position_x")],
                              [("all", "particle_ones")],
                              weight_field=None, n_bins=128, extrema=extrema_t,
                              logs=logs_t)
-        yield assert_equal, p1d["particle_ones"].sum(), 32**3
+        assert_equal(p1d["particle_ones"].sum(), 32**3)
 
         p2d = Profile2D(dd, "particle_position_x", 128, 0.0, 1.0, False,
                             "particle_position_y", 128, 0.0, 1.0, False,
                         weight_field = None)
         p2d.add_fields(["particle_ones"])
-        yield assert_equal, p2d["particle_ones"].sum(), 32**3
+        assert_equal(p2d["particle_ones"].sum(), 32**3)
 
         p3d = Profile3D(dd, "particle_position_x", 128, 0.0, 1.0, False,
                             "particle_position_y", 128, 0.0, 1.0, False,
                             "particle_position_z", 128, 0.0, 1.0, False,
                         weight_field = None)
         p3d.add_fields(["particle_ones"])
-        yield assert_equal, p3d["particle_ones"].sum(), 32**3
+        assert_equal(p3d["particle_ones"].sum(), 32**3)
 
 def test_mixed_particle_mesh_profiles():
     ds = fake_random_ds(32, particles=10)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -44,8 +44,8 @@
         # test if projections inherit the field parameters of their data sources
         dd.set_field_parameter("bulk_velocity", np.array([0,1,2]))
         proj = ds.proj("density", 0, data_source=dd)
-        yield assert_equal, dd.field_parameters["bulk_velocity"], \
-          proj.field_parameters["bulk_velocity"]
+        assert_equal(dd.field_parameters["bulk_velocity"],
+                     proj.field_parameters["bulk_velocity"])
 
         # Some simple projection tests with single grids
         for ax, an in enumerate("xyz"):
@@ -54,17 +54,18 @@
             for wf in ['density', ("gas", "density"), None]:
                 proj = ds.proj(["ones", "density"], ax, weight_field=wf)
                 if wf is None:
-                    yield assert_equal, proj["ones"].sum(), LENGTH_UNIT*proj["ones"].size
-                    yield assert_equal, proj["ones"].min(), LENGTH_UNIT
-                    yield assert_equal, proj["ones"].max(), LENGTH_UNIT
+                    assert_equal(proj["ones"].sum(),
+                                 LENGTH_UNIT*proj["ones"].size)
+                    assert_equal(proj["ones"].min(), LENGTH_UNIT)
+                    assert_equal(proj["ones"].max(), LENGTH_UNIT)
                 else:
-                    yield assert_equal, proj["ones"].sum(), proj["ones"].size
-                    yield assert_equal, proj["ones"].min(), 1.0
-                    yield assert_equal, proj["ones"].max(), 1.0
-                yield assert_equal, np.unique(proj["px"]), uc[xax]
-                yield assert_equal, np.unique(proj["py"]), uc[yax]
-                yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
-                yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+                    assert_equal(proj["ones"].sum(), proj["ones"].size)
+                    assert_equal(proj["ones"].min(), 1.0)
+                    assert_equal(proj["ones"].max(), 1.0)
+                assert_equal(np.unique(proj["px"]), uc[xax])
+                assert_equal(np.unique(proj["py"]), uc[yax])
+                assert_equal(np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0))
+                assert_equal(np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0))
                 plots = [proj.to_pw(fields='density'), proj.to_pw()]
                 for pw in plots:
                     for p in pw.plots.values():
@@ -75,16 +76,15 @@
                 frb = proj.to_frb((1.0, 'unitary'), 64)
                 for proj_field in ['ones', 'density', 'temperature']:
                     fi = ds._get_field_info(proj_field)
-                    yield assert_equal, frb[proj_field].info['data_source'], \
-                        proj.__str__()
-                    yield assert_equal, frb[proj_field].info['axis'], \
-                        ax
-                    yield assert_equal, frb[proj_field].info['field'], \
-                        proj_field
+                    assert_equal(frb[proj_field].info['data_source'],
+                                 proj.__str__())
+                    assert_equal(frb[proj_field].info['axis'], ax)
+                    assert_equal(frb[proj_field].info['field'], proj_field)
                     field_unit = Unit(fi.units)
                     if wf is not None:
-                        yield assert_equal, frb[proj_field].units, \
-                            Unit(field_unit, registry=ds.unit_registry)
+                        assert_equal(
+                            frb[proj_field].units,
+                            Unit(field_unit, registry=ds.unit_registry))
                     else:
                         if frb[proj_field].units.is_code_unit:
                             proj_unit = "code_length"
@@ -93,26 +93,23 @@
                         if field_unit != '' and field_unit != Unit():
                             proj_unit = \
                                 "({0}) * {1}".format(field_unit, proj_unit)
-                        yield assert_equal, frb[proj_field].units, \
-                            Unit(proj_unit, registry=ds.unit_registry)
-                    yield assert_equal, frb[proj_field].info['xlim'], \
-                        frb.bounds[:2]
-                    yield assert_equal, frb[proj_field].info['ylim'], \
-                        frb.bounds[2:]
-                    yield assert_equal, frb[proj_field].info['center'], \
-                        proj.center
+                        assert_equal(
+                            frb[proj_field].units,
+                            Unit(proj_unit, registry=ds.unit_registry))
+                    assert_equal(frb[proj_field].info['xlim'], frb.bounds[:2])
+                    assert_equal(frb[proj_field].info['ylim'], frb.bounds[2:])
+                    assert_equal(frb[proj_field].info['center'], proj.center)
                     if wf is None:
-                        yield assert_equal, \
-                            frb[proj_field].info['weight_field'], wf
+                        assert_equal(frb[proj_field].info['weight_field'], wf)
                     else:
-                        yield assert_equal, \
-                            frb[proj_field].info['weight_field'], \
-                            proj.data_source._determine_fields(wf)[0]
+                        assert_equal(
+                            frb[proj_field].info['weight_field'],
+                            proj.data_source._determine_fields(wf)[0])
             # wf == None
-            yield assert_equal, wf, None
+            assert_equal(wf, None)
             v1 = proj["density"].sum()
             v2 = (LENGTH_UNIT * dd["density"] * dd["d%s" % an]).sum()
-            yield assert_rel_equal, v1, v2, 10
+            assert_rel_equal(v1, v2, 10)
     teardown_func(fns)
 
 

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_rays.py
--- a/yt/data_objects/tests/test_rays.py
+++ b/yt/data_objects/tests/test_rays.py
@@ -30,7 +30,7 @@
             p2 = ds.arr(pp2[:,i] + 1e-8 * np.random.random(3), 'code_length')
 
             my_ray = ds.ray(p1, p2)
-            yield assert_rel_equal, my_ray['dts'].sum(), unitary, 14
+            assert_rel_equal(my_ray['dts'].sum(), unitary, 14)
             ray_cells = my_ray['dts'] > 0
 
             # find cells intersected by the ray
@@ -47,10 +47,10 @@
             tout = tout.min(axis=0)
             my_cells = (tin < tout) & (tin < 1) & (tout > 0)
 
-            yield assert_equal, ray_cells.sum(), my_cells.sum()
-            yield assert_rel_equal, my_ray['density'][ray_cells].sum(), \
-                                    my_all['density'][my_cells].sum(), 14
-            yield assert_rel_equal, my_ray['dts'].sum(), unitary, 14
+            assert_equal(ray_cells.sum(), my_cells.sum())
+            assert_rel_equal(my_ray['density'][ray_cells].sum(),
+                             my_all['density'][my_cells].sum(), 14)
+            assert_rel_equal(my_ray['dts'].sum(), unitary, 14)
 
 @requires_file('GadgetDiskGalaxy/snapshot_200.hdf5')
 def test_ray_in_particle_octree():

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -54,13 +54,13 @@
             for wf in ["density", None]:
                 slc = ds.slice(ax, slc_pos)
                 shifted_slc = ds.slice(ax, slc_pos + grid_eps)
-                yield assert_equal, slc["ones"].sum(), slc["ones"].size
-                yield assert_equal, slc["ones"].min(), 1.0
-                yield assert_equal, slc["ones"].max(), 1.0
-                yield assert_equal, np.unique(slc["px"]), uc[xax]
-                yield assert_equal, np.unique(slc["py"]), uc[yax]
-                yield assert_equal, np.unique(slc["pdx"]), 0.5 / dims[xax]
-                yield assert_equal, np.unique(slc["pdy"]), 0.5 / dims[yax]
+                assert_equal(slc["ones"].sum(), slc["ones"].size)
+                assert_equal(slc["ones"].min(), 1.0)
+                assert_equal(slc["ones"].max(), 1.0)
+                assert_equal(np.unique(slc["px"]), uc[xax])
+                assert_equal(np.unique(slc["py"]), uc[yax])
+                assert_equal(np.unique(slc["pdx"]), 0.5 / dims[xax])
+                assert_equal(np.unique(slc["pdy"]), 0.5 / dims[yax])
                 pw = slc.to_pw(fields='density')
                 for p in pw.plots.values():
                     tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
@@ -72,25 +72,19 @@
                     shifted_frb = shifted_slc.to_frb((1.0, 'unitary'), 64)
                     for slc_field in ['ones', 'density']:
                         fi = ds._get_field_info(slc_field)
-                        yield assert_equal, frb[slc_field].info['data_source'], \
-                            slc.__str__()
-                        yield assert_equal, frb[slc_field].info['axis'], \
-                            ax
-                        yield assert_equal, frb[slc_field].info['field'], \
-                            slc_field
-                        yield assert_equal, frb[slc_field].units, \
-                            Unit(fi.units)
-                        yield assert_equal, frb[slc_field].info['xlim'], \
-                            frb.bounds[:2]
-                        yield assert_equal, frb[slc_field].info['ylim'], \
-                            frb.bounds[2:]
-                        yield assert_equal, frb[slc_field].info['center'], \
-                            slc.center
-                        yield assert_equal, frb[slc_field].info['coord'], \
-                            slc_pos
-                        yield assert_equal, frb[slc_field], \
-                            shifted_frb[slc_field]
-            yield assert_equal, wf, None
+                        assert_equal(frb[slc_field].info['data_source'],
+                                     slc.__str__())
+                        assert_equal(frb[slc_field].info['axis'], ax)
+                        assert_equal(frb[slc_field].info['field'], slc_field)
+                        assert_equal(frb[slc_field].units, Unit(fi.units))
+                        assert_equal(frb[slc_field].info['xlim'],
+                                     frb.bounds[:2])
+                        assert_equal(frb[slc_field].info['ylim'],
+                                     frb.bounds[2:])
+                        assert_equal(frb[slc_field].info['center'], slc.center)
+                        assert_equal(frb[slc_field].info['coord'], slc_pos)
+                        assert_equal(frb[slc_field], shifted_frb[slc_field])
+            assert_equal(wf, None)
     teardown_func(fns)
 
 
@@ -106,4 +100,4 @@
     ds = fake_random_ds(64, nprocs=8, fields=["density"], negative=[False])
     slc = ds.slice(2, 1.0)
     slc["density"]
-    yield assert_equal, slc["density"].size, 0
+    assert_equal(slc["density"].size, 0)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_spheres.py
--- a/yt/data_objects/tests/test_spheres.py
+++ b/yt/data_objects/tests/test_spheres.py
@@ -32,8 +32,8 @@
     # Set the bulk velocity field parameter
     sp1.set_field_parameter("bulk_velocity", bulk_vel)
 
-    yield assert_equal, np.any(sp0["radial_velocity"] ==
-                               sp1["radial_velocity"]), False
+    assert_equal(np.any(sp0["radial_velocity"] == sp1["radial_velocity"]),
+                 False)
 
     # Radial profile without correction
     # Note we set n_bins = 8 here.
@@ -50,12 +50,12 @@
                          logs = {'radius': False},
                          n_bins = 8)
 
-    yield assert_equal, rp0.x_bins, rp1.x_bins
-    yield assert_equal, rp0.used, rp1.used
-    yield assert_equal, rp0.used.sum() > rp0.used.size/2.0, True
-    yield assert_equal, np.any(rp0["radial_velocity"][rp0.used] ==
-                               rp1["radial_velocity"][rp1.used]), \
-                               False
+    assert_equal(rp0.x_bins, rp1.x_bins)
+    assert_equal(rp0.used, rp1.used)
+    assert_equal(rp0.used.sum() > rp0.used.size/2.0, True)
+    assert_equal(np.any(rp0["radial_velocity"][rp0.used] ==
+                        rp1["radial_velocity"][rp1.used]),
+                 False)
 
     ref_sp = ds.sphere("c", 0.25)
     for f in _fields_to_compare:
@@ -64,4 +64,4 @@
         sp = ds.sphere(center, 0.25)
         for f in _fields_to_compare:
             sp[f].sort()
-            yield assert_equal, sp[f], ref_sp[f]
+            assert_equal(sp[f], ref_sp[f])

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/data_objects/tests/test_streamlines.py
--- a/yt/data_objects/tests/test_streamlines.py
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -23,6 +23,6 @@
         streams = Streamlines(ds, cs, length=length)
         streams.integrate_through_volume()
         for path in (streams.path(i) for i in range(8)):
-            yield assert_rel_equal, path['dts'].sum(), 1.0, 14
-            yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
+            assert_rel_equal(path['dts'].sum(), 1.0, 14)
+            assert_equal(np.all(path['t'] <= (1.0 + 1e-10)), True)
             path["density"]

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -257,7 +257,7 @@
             self[name] = DerivedField(name, sampling_type, function, **kwargs)
             return
 
-        if kwargs.get("particle_type", False):
+        if sampling_type == 'particle':
             ftype = 'all'
         else:
             ftype = self.ds.default_fluid_type

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -238,15 +238,23 @@
     def unitless_data(field, data):
             return np.ones(data['density'].shape)
 
-    ds.add_field(('gas','density_alias_no_units'), function=density_alias)
-    ds.add_field(('gas','density_alias_auto'), function=density_alias,
-                 units='auto', dimensions='density')
-    ds.add_field(('gas','density_alias_wrong_units'), function=density_alias,
+    ds.add_field(('gas','density_alias_no_units'), sampling_type='cell',
+                 function=density_alias)
+    ds.add_field(('gas','density_alias_auto'), sampling_type='cell',
+                 function=density_alias, units='auto', dimensions='density')
+    ds.add_field(('gas','density_alias_wrong_units'),
+                 function=density_alias,
+                 sampling_type='cell',
                  units='m/s')
-    ds.add_field(('gas','density_alias_unparseable_units'), function=density_alias,
+    ds.add_field(('gas','density_alias_unparseable_units'),
+                 sampling_type='cell',
+                 function=density_alias,
                  units='dragons')
-    ds.add_field(('gas','density_alias_auto_wrong_dims'), function=density_alias,
-                 units='auto', dimensions="temperature")
+    ds.add_field(('gas','density_alias_auto_wrong_dims'),
+                 function=density_alias,
+                 sampling_type='cell',
+                 units='auto',
+                 dimensions="temperature")
     assert_raises(YTFieldUnitError, get_data, ds, 'density_alias_no_units')
     assert_raises(YTFieldUnitError, get_data, ds, 'density_alias_wrong_units')
     assert_raises(YTFieldUnitParseError, get_data, ds,
@@ -256,11 +264,22 @@
     dens = ad['density_alias_auto']
     assert_equal(str(dens.units), 'g/cm**3')
 
-    ds.add_field(('gas','dimensionless'), function=unitless_data)
-    ds.add_field(('gas','dimensionless_auto'), function=unitless_data,
-                 units='auto', dimensions='dimensionless')
-    ds.add_field(('gas','dimensionless_explicit'), function=unitless_data, units='')
-    ds.add_field(('gas','dimensionful'), function=unitless_data, units='g/cm**3')
+    ds.add_field(('gas','dimensionless'),
+                 sampling_type='cell',
+                 function=unitless_data)
+    ds.add_field(('gas','dimensionless_auto'),
+                 function=unitless_data,
+                 sampling_type='cell',
+                 units='auto',
+                 dimensions='dimensionless')
+    ds.add_field(('gas','dimensionless_explicit'),
+                 function=unitless_data,
+                 sampling_type='cell',
+                 units='')
+    ds.add_field(('gas','dimensionful'),
+                 sampling_type='cell',
+                 function=unitless_data,
+                 units='g/cm**3')
 
     assert_equal(str(ad['dimensionless'].units), 'dimensionless')
     assert_equal(str(ad['dimensionless_auto'].units), 'dimensionless')
@@ -281,7 +300,8 @@
     def density_alias(field, data):
         return data['density']
 
-    ds.add_field('density_alias', function=density_alias, units='g/cm**3')
+    ds.add_field('density_alias', sampling_type='cell',
+                 function=density_alias, units='g/cm**3')
 
     ad['density_alias']
     assert ds.derived_field_list[0] == 'density_alias'
@@ -292,7 +312,8 @@
     def density_alias(field, data):
         return data['density']
 
-    ds.add_field('density_alias', function=density_alias, units='g/cm**3')
+    ds.add_field('density_alias', sampling_type='cell',
+                 function=density_alias, units='g/cm**3')
 
     ds.field_info['density_alias']
     ds.field_info['gas', 'density_alias']
@@ -302,8 +323,9 @@
     def pmass_alias(field, data):
         return data['particle_mass']
         
-    ds.add_field('particle_mass_alias', function=pmass_alias, 
-                 units='g', particle_type=True)
+    ds.add_field('particle_mass_alias', function=pmass_alias,
+                 sampling_type='particle',
+                 units='g')
 
     ds.field_info['particle_mass_alias']
     ds.field_info['all', 'particle_mass_alias']

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/fields/tests/test_magnetic_fields.py
--- a/yt/fields/tests/test_magnetic_fields.py
+++ b/yt/fields/tests/test_magnetic_fields.py
@@ -43,10 +43,10 @@
              dd2["magnetic_field_z"]**2)/(2.0*mu_0)
     emag2.convert_to_units("Pa")
 
-    yield assert_almost_equal, emag1, dd1["magnetic_energy"]
-    yield assert_almost_equal, emag2, dd2["magnetic_energy"]
+    assert_almost_equal(emag1, dd1["magnetic_energy"])
+    assert_almost_equal(emag2, dd2["magnetic_energy"])
 
     assert str(emag1.units) == str(dd1["magnetic_energy"].units)
     assert str(emag2.units) == str(dd2["magnetic_energy"].units)
 
-    yield assert_almost_equal, emag1.in_cgs(), emag2.in_cgs()
+    assert_almost_equal(emag1.in_cgs(), emag2.in_cgs())

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/art/tests/test_outputs.py
--- a/yt/frontends/art/tests/test_outputs.py
+++ b/yt/frontends/art/tests/test_outputs.py
@@ -41,7 +41,7 @@
 def test_d9p():
     ds = data_dir_load(d9p)
     ds.index
-    yield assert_equal, str(ds), "10MpcBox_HartGal_csf_a0.500.d"
+    assert_equal(str(ds), "10MpcBox_HartGal_csf_a0.500.d")
     dso = [None, ("sphere", ("max", (0.1, 'unitary')))]
     for field in _fields:
         for axis in [0, 1, 2]:
@@ -56,18 +56,18 @@
     ad = ds.all_data()
     # 'Ana' variable values output from the ART Fortran 'ANA' analysis code
     AnaNStars = 6255
-    yield assert_equal, ad[('stars', 'particle_type')].size, AnaNStars
-    yield assert_equal, ad[('specie4', 'particle_type')].size, AnaNStars
+    assert_equal(ad[('stars', 'particle_type')].size, AnaNStars)
+    assert_equal(ad[('specie4', 'particle_type')].size, AnaNStars)
 
     # The *real* asnwer is 2833405, but yt misses one particle since it lives
     # on a domain boundary. See issue 814. When that is fixed, this test
     # will need to be updated
     AnaNDM = 2833404
-    yield assert_equal, ad[('darkmatter', 'particle_type')].size, AnaNDM
-    yield assert_equal, (ad[('specie0', 'particle_type')].size +
-                         ad[('specie1', 'particle_type')].size +
-                         ad[('specie2', 'particle_type')].size +
-                         ad[('specie3', 'particle_type')].size), AnaNDM
+    assert_equal(ad[('darkmatter', 'particle_type')].size, AnaNDM)
+    assert_equal((ad[('specie0', 'particle_type')].size +
+                  ad[('specie1', 'particle_type')].size +
+                  ad[('specie2', 'particle_type')].size +
+                  ad[('specie3', 'particle_type')].size), AnaNDM)
 
     for spnum in range(5):
         npart_read = ad['specie%s' % spnum, 'particle_type'].size
@@ -81,34 +81,33 @@
     AnaVolume = YTQuantity(364.640074656, 'Mpc**3')
     Volume = 1
     for i in ds.domain_width.in_units('Mpc'):
-        yield assert_almost_equal, i, AnaBoxSize
+        assert_almost_equal(i, AnaBoxSize)
         Volume *= i
-    yield assert_almost_equal, Volume, AnaVolume
+    assert_almost_equal(Volume, AnaVolume)
 
     AnaNCells = 4087490
-    yield assert_equal, len(ad[('index', 'cell_volume')]), AnaNCells
+    assert_equal(len(ad[('index', 'cell_volume')]), AnaNCells)
 
     AnaTotDMMass = YTQuantity(1.01191786808255e+14, 'Msun')
-    yield (assert_almost_equal,
-           ad[('darkmatter', 'particle_mass')].sum().in_units('Msun'),
-           AnaTotDMMass)
+    assert_almost_equal(
+        ad[('darkmatter', 'particle_mass')].sum().in_units('Msun'),
+        AnaTotDMMass)
 
     AnaTotStarMass = YTQuantity(1776701.3990607238, 'Msun')
-    yield (assert_almost_equal,
-           ad[('stars', 'particle_mass')].sum().in_units('Msun'),
-           AnaTotStarMass)
+    assert_almost_equal(ad[('stars', 'particle_mass')].sum().in_units('Msun'),
+                        AnaTotStarMass)
 
     AnaTotStarMassInitial = YTQuantity(2423468.2801332865, 'Msun')
-    yield (assert_almost_equal,
-           ad[('stars', 'particle_mass_initial')].sum().in_units('Msun'),
-           AnaTotStarMassInitial)
+    assert_almost_equal(
+        ad[('stars', 'particle_mass_initial')].sum().in_units('Msun'),
+        AnaTotStarMassInitial)
 
     AnaTotGasMass = YTQuantity(1.7826982029216785e+13, 'Msun')
-    yield (assert_almost_equal, ad[('gas', 'cell_mass')].sum().in_units('Msun'),
-           AnaTotGasMass)
+    assert_almost_equal(ad[('gas', 'cell_mass')].sum().in_units('Msun'),
+                        AnaTotGasMass)
 
     AnaTotTemp = YTQuantity(150219844793.39072, 'K')  # just leaves
-    yield assert_equal, ad[('gas', 'temperature')].sum(), AnaTotTemp
+    assert_equal(ad[('gas', 'temperature')].sum(), AnaTotTemp)
 
 
 @requires_file(d9p)
@@ -117,5 +116,4 @@
 
 @requires_file(d9p)
 def test_units_override():
-    for test in units_override_check(d9p):
-        yield test
+    units_override_check(d9p)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -34,7 +34,7 @@
 def test_sizmbhloz():
     ds = data_dir_load(sizmbhloz)
     ds.max_range = 1024*1024
-    yield assert_equal, str(ds), "sizmbhloz-clref04SNth-rs9_a0.9011.art"
+    assert_equal(str(ds), "sizmbhloz-clref04SNth-rs9_a0.9011.art")
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
     for dobj_name in dso:
         for field in _fields:
@@ -47,7 +47,7 @@
         dobj = create_obj(ds, dobj_name)
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
-        yield assert_equal, s1, s2
+        assert_equal(s1, s2)
     assert_equal(ds.particle_type_counts, {'N-BODY': 100000, 'STAR': 110650})
 
 
@@ -57,5 +57,4 @@
 
 @requires_file(sizmbhloz)
 def test_units_override():
-    for test in units_override_check(sizmbhloz):
-        yield test
+    units_override_check(sizmbhloz)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -33,7 +33,7 @@
 @requires_ds(cloud)
 def test_cloud():
     ds = data_dir_load(cloud)
-    yield assert_equal, str(ds), "Cloud.0050"
+    assert_equal(str(ds), "Cloud.0050")
     for test in small_patch_amr(ds, _fields_cloud):
         test_cloud.__name__ = test.description
         yield test
@@ -44,7 +44,7 @@
 @requires_ds(blast)
 def test_blast():
     ds = data_dir_load(blast)
-    yield assert_equal, str(ds), "Blast.0100"
+    assert_equal(str(ds), "Blast.0100")
     for test in small_patch_amr(ds, _fields_blast):
         test_blast.__name__ = test.description
         yield test
@@ -73,7 +73,7 @@
 @requires_ds(stripping, big_data=True)
 def test_stripping():
     ds = data_dir_load(stripping, kwargs={"units_override":uo_stripping})
-    yield assert_equal, str(ds), "rps.0062"
+    assert_equal(str(ds), "rps.0062")
     for test in small_patch_amr(ds, _fields_stripping):
         test_stripping.__name__ = test.description
         yield test
@@ -100,12 +100,16 @@
     assert_equal(ds3.time_unit, u.Myr)
     assert_equal(ds3.mass_unit, 1e14*u.Msun)
 
-    yield assert_equal, sp1.quantities.extrema("pressure"), sp2.quantities.extrema("pressure")
-    yield assert_allclose_units, sp1.quantities.total_quantity("pressure"), sp2.quantities.total_quantity("pressure")
+    assert_equal(sp1.quantities.extrema("pressure"),
+                 sp2.quantities.extrema("pressure"))
+    assert_allclose_units(sp1.quantities.total_quantity("pressure"),
+                          sp2.quantities.total_quantity("pressure"))
     for ax in "xyz":
-        yield assert_equal, sp1.quantities.extrema("velocity_%s" % ax), sp2.quantities.extrema("velocity_%s" % ax)
-    yield assert_allclose_units, sp1.quantities.bulk_velocity(), sp2.quantities.bulk_velocity()
-    yield assert_equal, prj1["density"], prj2["density"]
+        assert_equal(sp1.quantities.extrema("velocity_%s" % ax),
+                     sp2.quantities.extrema("velocity_%s" % ax))
+    assert_allclose_units(sp1.quantities.bulk_velocity(),
+                          sp2.quantities.bulk_velocity())
+    assert_equal(prj1["density"], prj2["density"])
 
     ytcfg["yt","skip_dataset_cache"] = "False"
 

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/athena_pp/tests/test_outputs.py
--- a/yt/frontends/athena_pp/tests/test_outputs.py
+++ b/yt/frontends/athena_pp/tests/test_outputs.py
@@ -71,8 +71,7 @@
 
 @requires_file(AM06)
 def test_units_override():
-    for test in units_override_check(AM06):
-        yield test
+    units_override_check(AM06)
 
 @requires_file(AM06)
 def test_AthenaPPDataset():

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/boxlib/tests/test_outputs.py
--- a/yt/frontends/boxlib/tests/test_outputs.py
+++ b/yt/frontends/boxlib/tests/test_outputs.py
@@ -39,7 +39,7 @@
 @requires_ds(radadvect)
 def test_radadvect():
     ds = data_dir_load(radadvect)
-    yield assert_equal, str(ds), "plt00000"
+    assert_equal(str(ds), "plt00000")
     for test in small_patch_amr(ds, _orion_fields):
         test_radadvect.__name__ = test.description
         yield test
@@ -48,7 +48,7 @@
 @requires_ds(rt)
 def test_radtube():
     ds = data_dir_load(rt)
-    yield assert_equal, str(ds), "plt00500"
+    assert_equal(str(ds), "plt00500")
     for test in small_patch_amr(ds, _orion_fields):
         test_radtube.__name__ = test.description
         yield test
@@ -57,7 +57,7 @@
 @requires_ds(star)
 def test_star():
     ds = data_dir_load(star)
-    yield assert_equal, str(ds), "plrd01000"
+    assert_equal(str(ds), "plrd01000")
     for test in small_patch_amr(ds, _orion_fields):
         test_star.__name__ = test.description
         yield test
@@ -66,7 +66,7 @@
 @requires_ds(LyA)
 def test_LyA():
     ds = data_dir_load(LyA)
-    yield assert_equal, str(ds), "plt00000"
+    assert_equal(str(ds), "plt00000")
     for test in small_patch_amr(ds, _nyx_fields,
                                 input_center="c",
                                 input_weight="Ne"):
@@ -110,7 +110,7 @@
 @requires_ds(RT_particles)
 def test_RT_particles():
     ds = data_dir_load(RT_particles)
-    yield assert_equal, str(ds), "plt00050"
+    assert_equal(str(ds), "plt00050")
     for test in small_patch_amr(ds, _castro_fields):
         test_RT_particles.__name__ = test.description
         yield test
@@ -148,7 +148,7 @@
 @requires_ds(langmuir)
 def test_langmuir():
     ds = data_dir_load(langmuir)
-    yield assert_equal, str(ds), "plt00020_v2"
+    assert_equal(str(ds), "plt00020_v2")
     for test in small_patch_amr(ds, _warpx_fields, 
                                 input_center="c",
                                 input_weight="Ex"):
@@ -159,7 +159,7 @@
 @requires_ds(plasma)
 def test_plasma():
     ds = data_dir_load(plasma)
-    yield assert_equal, str(ds), "plt00030_v2"
+    assert_equal(str(ds), "plt00030_v2")
     for test in small_patch_amr(ds, _warpx_fields,
                                 input_center="c",
                                 input_weight="Ex"):
@@ -223,8 +223,7 @@
 
 @requires_file(rt)
 def test_units_override():
-    for test in units_override_check(rt):
-        yield test
+    units_override_check(rt)
 
 nyx_no_particles = "nyx_sedov_plt00086"
 @requires_file(nyx_no_particles)
@@ -247,5 +246,3 @@
 
     ds = data_dir_load(nyx_no_particles)
     assert_equal(sorted(ds.field_list), fields)
-
-

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -33,7 +33,7 @@
 @requires_ds(gc)
 def test_gc():
     ds = data_dir_load(gc)
-    yield assert_equal, str(ds), "data.0077.3d.hdf5"
+    assert_equal(str(ds), "data.0077.3d.hdf5")
     for test in small_patch_amr(ds, _fields):
         test_gc.__name__ = test.description
         yield test
@@ -42,7 +42,7 @@
 @requires_ds(tb)
 def test_tb():
     ds = data_dir_load(tb)
-    yield assert_equal, str(ds), "data.0005.3d.hdf5"
+    assert_equal(str(ds), "data.0005.3d.hdf5")
     for test in small_patch_amr(ds, _fields):
         test_tb.__name__ = test.description
         yield test
@@ -51,7 +51,7 @@
 @requires_ds(iso)
 def test_iso():
     ds = data_dir_load(iso)
-    yield assert_equal, str(ds), "data.0000.3d.hdf5"
+    assert_equal(str(ds), "data.0000.3d.hdf5")
     for test in small_patch_amr(ds, _fields):
         test_iso.__name__ = test.description
         yield test
@@ -61,7 +61,7 @@
 @requires_ds(zp)
 def test_zp():
     ds = data_dir_load(zp)
-    yield assert_equal, str(ds), "plt32.2d.hdf5"
+    assert_equal(str(ds), "plt32.2d.hdf5")
     for test in small_patch_amr(ds, _zp_fields, input_center="c",
                                 input_weight="rhs"):
         test_zp.__name__ = test.description
@@ -71,7 +71,7 @@
 @requires_ds(kho)
 def test_kho():
     ds = data_dir_load(kho)
-    yield assert_equal, str(ds), "data.0004.hdf5"
+    assert_equal(str(ds), "data.0004.hdf5")
     for test in small_patch_amr(ds, _fields):
         test_kho.__name__ = test.description
         yield test
@@ -92,15 +92,12 @@
 
 @requires_file(zp)
 def test_units_override_zp():
-    for test in units_override_check(zp):
-        yield test
+    units_override_check(zp)
 
 @requires_file(gc)
 def test_units_override_gc():
-    for test in units_override_check(gc):
-        yield test
+    units_override_check(gc)
 
 @requires_file(kho)
 def test_units_override_kho():
-    for test in units_override_check(kho):
-        yield test
+    units_override_check(kho)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -71,7 +71,7 @@
 @requires_ds(m7)
 def test_moving7():
     ds = data_dir_load(m7)
-    yield assert_equal, str(ds), "moving7_0010"
+    assert_equal(str(ds), "moving7_0010")
     for test in small_patch_amr(m7, _fields):
         test_moving7.__name__ = test.description
         yield test
@@ -80,7 +80,7 @@
 def test_galaxy0030():
     ds = data_dir_load(g30)
     yield check_color_conservation(ds)
-    yield assert_equal, str(ds), "galaxy0030"
+    assert_equal(str(ds), "galaxy0030")
     for test in big_patch_amr(ds, _fields):
         test_galaxy0030.__name__ = test.description
         yield test
@@ -122,18 +122,17 @@
 
 @requires_file(enzotiny)
 def test_units_override():
-    for test in units_override_check(enzotiny):
-        yield test
+    units_override_check(enzotiny)
 
 @requires_ds(ecp, big_data=True)
 def test_nuclei_density_fields():
     ds = data_dir_load(ecp)
     ad = ds.all_data()
-    yield assert_array_equal, ad["H_nuclei_density"], \
-      (ad["H_number_density"] + ad["H_p1_number_density"])
-    yield assert_array_equal, ad["He_nuclei_density"], \
-      (ad["He_number_density"] + ad["He_p1_number_density"] +
-       ad["He_p2_number_density"])
+    assert_array_equal(ad["H_nuclei_density"],
+                       (ad["H_number_density"] + ad["H_p1_number_density"]))
+    assert_array_equal(ad["He_nuclei_density"],
+        (ad["He_number_density"] +
+         ad["He_p1_number_density"] + ad["He_p2_number_density"]))
 
 @requires_file(enzotiny)
 def test_EnzoDataset():

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/exodus_ii/tests/test_outputs.py
--- a/yt/frontends/exodus_ii/tests/test_outputs.py
+++ b/yt/frontends/exodus_ii/tests/test_outputs.py
@@ -41,12 +41,12 @@
                   ('connect2', 'conv_marker'),
                   ('connect2', 'convected'),
                   ('connect2', 'diffused')]
-    yield assert_equal, str(ds), "out.e"
-    yield assert_equal, ds.dimensionality, 3
-    yield assert_equal, ds.current_time, 0.0
-    yield assert_array_equal, ds.parameters['nod_names'], ['convected', 'diffused']
-    yield assert_equal, ds.parameters['num_meshes'], 2
-    yield assert_array_equal, ds.field_list, field_list
+    assert_equal(str(ds), "out.e")
+    assert_equal(ds.dimensionality, 3)
+    assert_equal(ds.current_time, 0.0)
+    assert_array_equal(ds.parameters['nod_names'], ['convected', 'diffused'])
+    assert_equal(ds.parameters['num_meshes'], 2)
+    assert_array_equal(ds.field_list, field_list)
 
 out_s002 = "ExodusII/out.e-s002"
 
@@ -66,10 +66,10 @@
                   ('connect2', 'conv_marker'),
                   ('connect2', 'convected'),
                   ('connect2', 'diffused')]
-    yield assert_equal, str(ds), "out.e-s002"
-    yield assert_equal, ds.dimensionality, 3
-    yield assert_equal, ds.current_time, 2.0
-    yield assert_array_equal, ds.field_list, field_list
+    assert_equal(str(ds), "out.e-s002")
+    assert_equal(ds.dimensionality, 3)
+    assert_equal(ds.current_time, 2.0)
+    assert_array_equal(ds.field_list, field_list)
 
 gold = "ExodusII/gold.e"
 
@@ -78,8 +78,8 @@
 def test_gold():
     ds = data_dir_load(gold)
     field_list = [('all', 'forced'), ('connect1', 'forced')]
-    yield assert_equal, str(ds), "gold.e"
-    yield assert_array_equal, ds.field_list, field_list
+    assert_equal(str(ds), "gold.e")
+    assert_array_equal(ds.field_list, field_list)
 
 big_data = "MOOSE_sample_data/mps_out.e"
 

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/fits/tests/test_outputs.py
--- a/yt/frontends/fits/tests/test_outputs.py
+++ b/yt/frontends/fits/tests/test_outputs.py
@@ -29,7 +29,7 @@
 @requires_ds(grs)
 def test_grs():
     ds = data_dir_load(grs, cls=FITSDataset, kwargs={"nan_mask":0.0})
-    yield assert_equal, str(ds), "grs-50-cube.fits"
+    assert_equal(str(ds), "grs-50-cube.fits")
     for test in small_patch_amr(ds, _fields_grs, input_center="c", input_weight="ones"):
         test_grs.__name__ = test.description
         yield test
@@ -40,15 +40,14 @@
 @requires_ds(vf)
 def test_velocity_field():
     ds = data_dir_load(vf, cls=FITSDataset)
-    yield assert_equal, str(ds), "velocity_field_20.fits"
+    assert_equal(str(ds), "velocity_field_20.fits")
     for test in small_patch_amr(ds, _fields_vels, input_center="c", input_weight="ones"):
         test_velocity_field.__name__ = test.description
         yield test
 
 @requires_file(vf)
 def test_units_override():
-    for test in units_override_check(vf):
-        yield test
+    units_override_check(vf)
 
 @requires_file(grs)
 def test_FITSDataset():

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -33,7 +33,7 @@
 @requires_ds(sloshing, big_data=True)
 def test_sloshing():
     ds = data_dir_load(sloshing)
-    yield assert_equal, str(ds), "sloshing_low_res_hdf5_plt_cnt_0300"
+    assert_equal(str(ds), "sloshing_low_res_hdf5_plt_cnt_0300")
     for test in small_patch_amr(ds, _fields):
         test_sloshing.__name__ = test.description
         yield test
@@ -44,7 +44,7 @@
 @requires_ds(wt)
 def test_wind_tunnel():
     ds = data_dir_load(wt)
-    yield assert_equal, str(ds), "windtunnel_4lev_hdf5_plt_cnt_0030"
+    assert_equal(str(ds), "windtunnel_4lev_hdf5_plt_cnt_0030")
     for test in small_patch_amr(ds, _fields_2d):
         test_wind_tunnel.__name__ = test.description
         yield test
@@ -55,8 +55,7 @@
 
 @requires_file(sloshing)
 def test_units_override():
-    for test in units_override_check(sloshing):
-        yield test
+    units_override_check(sloshing)
 
 fid_1to3_b1 = "fiducial_1to3_b1/fiducial_1to3_b1_hdf5_part_0080"
 

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -69,7 +69,7 @@
 
     # Test that all subhalo particles are contained within
     # their parent group.
-    yield assert_equal, total_sub, total_int
+    assert_equal(total_sub, total_int)
 
 @requires_file(g298)
 def test_halo_masses():
@@ -85,7 +85,7 @@
         # Check that masses from halo containers are the same
         # as the array of all masses.  This will test getting
         # scalar fields for halos correctly.
-        yield assert_array_equal, ad[ptype, "particle_mass"], mass
+        assert_array_equal(ad[ptype, "particle_mass"], mass)
 
 # fof/subhalo catalog with no member ids in first file
 g56 = "gadget_halos/data/groups_056/fof_subhalo_tab_056.0.hdf5"

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/gamer/tests/test_outputs.py
--- a/yt/frontends/gamer/tests/test_outputs.py
+++ b/yt/frontends/gamer/tests/test_outputs.py
@@ -34,7 +34,7 @@
 @requires_ds(jet, big_data=True)
 def test_jet():
     ds = data_dir_load(jet, kwargs={"units_override":jet_units})
-    yield assert_equal, str(ds), "jet_000002"
+    assert_equal(str(ds), "jet_000002")
     for test in small_patch_amr(ds, _fields_jet):
         test_jet.__name__ = test.description
         yield test
@@ -46,7 +46,7 @@
 @requires_ds(psiDM, big_data=True)
 def test_psiDM():
     ds = data_dir_load(psiDM)
-    yield assert_equal, str(ds), "psiDM_000020"
+    assert_equal(str(ds), "psiDM_000020")
     for test in small_patch_amr(ds, _fields_psiDM):
         test_psiDM.__name__ = test.description
         yield test
@@ -58,7 +58,7 @@
 @requires_ds(plummer, big_data=True)
 def test_plummer():
     ds = data_dir_load(plummer)
-    yield assert_equal, str(ds), "plummer_000000"
+    assert_equal(str(ds), "plummer_000000")
     for test in small_patch_amr(ds, _fields_plummer):
         test_plummer.__name__ = test.description
         yield test
@@ -71,5 +71,4 @@
 
 @requires_file(jet)
 def test_units_override():
-    for test in units_override_check(jet):
-        yield test
+    units_override_check(jet)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/gdf/tests/test_outputs.py
--- a/yt/frontends/gdf/tests/test_outputs.py
+++ b/yt/frontends/gdf/tests/test_outputs.py
@@ -30,7 +30,7 @@
 @requires_ds(sedov)
 def test_sedov_tunnel():
     ds = data_dir_load(sedov)
-    yield assert_equal, str(ds), "sedov_tst_0004"
+    assert_equal(str(ds), "sedov_tst_0004")
     for test in small_patch_amr(ds, _fields):
         test_sedov_tunnel.__name__ = test.description
         yield test
@@ -43,5 +43,4 @@
 
 @requires_file(sedov)
 def test_units_override():
-    for test in units_override_check(sedov):
-        yield test
+    units_override_check(sedov)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/moab/tests/test_c5.py
--- a/yt/frontends/moab/tests/test_c5.py
+++ b/yt/frontends/moab/tests/test_c5.py
@@ -35,26 +35,26 @@
 def test_cantor_5():
     np.random.seed(0x4d3d3d3)
     ds = data_dir_load(c5)
-    yield assert_equal, str(ds), "c5"
+    assert_equal(str(ds), "c5")
     dso = [ None, ("sphere", ("c", (0.1, 'unitary'))),
                   ("sphere", ("c", (0.2, 'unitary')))]
     dd = ds.all_data()
-    yield assert_almost_equal, ds.index.get_smallest_dx(), 0.00411522633744843, 10
-    yield assert_equal, dd["x"].shape[0], 63*63*63
-    yield assert_almost_equal, \
-        dd["cell_volume"].in_units("code_length**3").sum(dtype="float64").d, \
-        1.0, 10
+    assert_almost_equal(ds.index.get_smallest_dx(), 0.00411522633744843, 10)
+    assert_equal(dd["x"].shape[0], 63*63*63)
+    assert_almost_equal(
+        dd["cell_volume"].in_units("code_length**3").sum(dtype="float64").d,
+        1.0, 10)
     for offset_1 in [1e-9, 1e-4, 0.1]:
         for offset_2 in [1e-9, 1e-4, 0.1]:
             DLE = ds.domain_left_edge
             DRE = ds.domain_right_edge
             ray = ds.ray(DLE + offset_1 * DLE.uq,
                          DRE - offset_2 * DRE.uq)
-            yield assert_almost_equal, ray["dts"].sum(dtype="float64"), 1.0, 8
+            assert_almost_equal(ray["dts"].sum(dtype="float64"), 1.0, 8)
     for i, p1 in enumerate(np.random.random((5, 3))):
         for j, p2 in enumerate(np.random.random((5, 3))):
             ray = ds.ray(p1, p2)
-            yield assert_almost_equal, ray["dts"].sum(dtype="float64"), 1.0, 8
+            assert_almost_equal(ray["dts"].sum(dtype="float64"), 1.0, 8)
     for field in _fields:
         for dobj_name in dso:
             yield FieldValuesTest(c5, field, dobj_name)
@@ -66,5 +66,4 @@
 
 @requires_file(c5)
 def test_units_override():
-    for test in units_override_check(c5):
-        yield test
+    units_override_check(c5)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/open_pmd/tests/test_outputs.py
--- a/yt/frontends/open_pmd/tests/test_outputs.py
+++ b/yt/frontends/open_pmd/tests/test_outputs.py
@@ -64,14 +64,16 @@
     domain_width = [2.08e-05, 2.08e-05, 2.01e-05] * np.ones_like(ds.domain_left_edge)
 
     assert isinstance(ds, OpenPMDDataset)
-    yield assert_equal, str(ds), "data00000100.h5"
-    yield assert_equal, ds.dimensionality, 3
-    yield assert_equal, ds.particle_types_raw, ('io',)
+    assert_equal(str(ds), "data00000100.h5")
+    assert_equal(ds.dimensionality, 3)
+    assert_equal(ds.particle_types_raw, ('io',))
     assert "all" in ds.particle_unions
-    yield assert_array_equal, ds.field_list, field_list
-    yield assert_array_equal, ds.domain_dimensions, domain_dimensions
-    yield assert_almost_equal, ds.current_time, 3.28471214521e-14 * np.ones_like(ds.current_time)
-    yield assert_almost_equal, ds.domain_right_edge - ds.domain_left_edge, domain_width
+    assert_array_equal(ds.field_list, field_list)
+    assert_array_equal(ds.domain_dimensions, domain_dimensions)
+    assert_almost_equal(ds.current_time,
+                        3.28471214521e-14 * np.ones_like(ds.current_time))
+    assert_almost_equal(ds.domain_right_edge - ds.domain_left_edge,
+                        domain_width)
 
 
 @requires_file(twoD)
@@ -127,11 +129,13 @@
     domain_width = [3.06e-05, 2.01e-05, 1e+0] * np.ones_like(ds.domain_left_edge)
 
     assert isinstance(ds, OpenPMDDataset)
-    yield assert_equal, str(ds), "data00000100.h5"
-    yield assert_equal, ds.dimensionality, 2
-    yield assert_equal, ds.particle_types_raw, ('Hydrogen1+', 'electrons')
+    assert_equal(str(ds), "data00000100.h5")
+    assert_equal(ds.dimensionality, 2)
+    assert_equal(ds.particle_types_raw, ('Hydrogen1+', 'electrons'))
     assert "all" in ds.particle_unions
-    yield assert_array_equal, ds.field_list, field_list
-    yield assert_array_equal, ds.domain_dimensions, domain_dimensions
-    yield assert_almost_equal, ds.current_time, 3.29025596712e-14 * np.ones_like(ds.current_time)
-    yield assert_almost_equal, ds.domain_right_edge - ds.domain_left_edge, domain_width
+    assert_array_equal(ds.field_list, field_list)
+    assert_array_equal(ds.domain_dimensions, domain_dimensions)
+    assert_almost_equal(ds.current_time,
+                        3.29025596712e-14 * np.ones_like(ds.current_time))
+    assert_almost_equal(ds.domain_right_edge - ds.domain_left_edge,
+                        domain_width)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/owls_subfind/tests/test_outputs.py
--- a/yt/frontends/owls_subfind/tests/test_outputs.py
+++ b/yt/frontends/owls_subfind/tests/test_outputs.py
@@ -34,7 +34,7 @@
 @requires_ds(g8)
 def test_fields_g8():
     ds = data_dir_load(g8)
-    yield assert_equal, str(ds), os.path.basename(g8)
+    assert_equal(str(ds), os.path.basename(g8))
     for field in _fields:
         yield FieldValuesTest(g8, field, particle_type=True)
 
@@ -42,7 +42,7 @@
 @requires_ds(g1)
 def test_fields_g1():
     ds = data_dir_load(g1)
-    yield assert_equal, str(ds), os.path.basename(g1)
+    assert_equal(str(ds), os.path.basename(g1))
     for field in _fields:
         yield FieldValuesTest(g1, field, particle_type=True)
 

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -35,7 +35,7 @@
 @requires_ds(output_00080)
 def test_output_00080():
     ds = data_dir_load(output_00080)
-    yield assert_equal, str(ds), "info_00080"
+    assert_equal(str(ds), "info_00080")
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
     for dobj_name in dso:
         for field in _fields:
@@ -48,7 +48,7 @@
         dobj = create_obj(ds, dobj_name)
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
-        yield assert_equal, s1, s2
+        assert_equal(s1, s2)
     assert_equal(ds.particle_type_counts, {'io': 1090895})
 
 @requires_file(output_00080)
@@ -57,8 +57,7 @@
 
 @requires_file(output_00080)
 def test_units_override():
-    for test in units_override_check(output_00080):
-        yield test
+    units_override_check(output_00080)
 
 
 ramsesNonCosmo = 'DICEGalaxyDisk_nonCosmological/output_00002'
@@ -67,7 +66,7 @@
     ds = yt.load(os.path.join(ramsesNonCosmo, 'info_00002.txt'))
 
     expected_raw_time = 0.0299468077820411 # in ramses unit
-    yield assert_equal, ds.current_time.value, expected_raw_time
+    assert_equal(ds.current_time.value, expected_raw_time)
 
     expected_time = 14087886140997.336 # in seconds
     assert_equal(ds.current_time.in_units('s').value, expected_time)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/rockstar/tests/test_outputs.py
--- a/yt/frontends/rockstar/tests/test_outputs.py
+++ b/yt/frontends/rockstar/tests/test_outputs.py
@@ -31,7 +31,7 @@
 @requires_ds(r1)
 def test_fields_r1():
     ds = data_dir_load(r1)
-    yield assert_equal, str(ds), os.path.basename(r1)
+    assert_equal(str(ds), os.path.basename(r1))
     for field in _fields:
         yield FieldValuesTest(r1, field, particle_type=True)
 

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/sdf/tests/test_outputs.py
--- a/yt/frontends/sdf/tests/test_outputs.py
+++ b/yt/frontends/sdf/tests/test_outputs.py
@@ -41,7 +41,7 @@
     if not internet_on():
         return
     ds = SDFDataset(scivis_data)
-    yield assert_equal, str(ds), "ds14_scivis_0128_e4_dt04_1.0000"
+    assert_equal(str(ds), "ds14_scivis_0128_e4_dt04_1.0000")
     ad = ds.all_data()
     assert np.unique(ad['particle_position_x']).size > 1
     ProjectionPlot(ds, "z", _fields)

diff -r a5836030bd53a53f9873e433566b6b691939752e -r 416bc87fd064d8cd5d64a98922c00c1cc71a0f7d yt/frontends/stream/tests/test_stream_amrgrids.py
--- a/yt/frontends/stream/tests/test_stream_amrgrids.py
+++ b/yt/frontends/stream/tests/test_stream_amrgrids.py
@@ -26,7 +26,7 @@
     def make_proj():
         p = ProjectionPlot(spf, 'x', ["density"], center='c', origin='native')
         return p
-    yield assert_raises, YTIntDomainOverflow, make_proj
+    assert_raises(YTIntDomainOverflow, make_proj)
 
 def test_refine_by():
     grid_data = []

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list