[Yt-svn] commit/yt: 6 new changesets

Bitbucket commits-noreply at bitbucket.org
Wed May 18 08:48:09 PDT 2011


6 new changesets in yt:

http://bitbucket.org/yt_analysis/yt/changeset/447d77bb79c1/
changeset:   r4266:447d77bb79c1
branch:      yt
user:        MatthewTurk
date:        2011-05-10 09:44:02
summary:     Re-organizing tests and getting rid of old, non-working test code
affected #:  12 files (5.2 KB)

--- a/tests/answer_tests/fields_to_test.py	Mon May 09 22:42:45 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,9 +0,0 @@
-# We want to test several things.  We need to be able to run the 
-
-field_list = ["Density", "Temperature", "x-velocity", "y-velocity", "z-velocity",
-                  # Now some derived fields
-                  "Pressure", "SoundSpeed", "particle_density", "Entropy",
-                  # Ghost zones
-                  "AveragedDensity", "DivV"]
-
-particle_field_list = ["particle_position_x", "ParticleMassMsun"]


--- a/tests/answer_tests/hierarchy_consistency.py	Mon May 09 22:42:45 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTStaticOutputTest, RegressionTestException
-from yt.funcs import ensure_list
-
-class HierarchyInconsistent(RegressionTestException):
-    pass
-
-class HierarchyConsistency(YTStaticOutputTest):
-    name = "hierarchy_consistency"
-    def run(self):
-        self.result = \
-            all( g in ensure_list(c.Parent) for g in self.pf.h.grids
-                                            for c in g.Children )
-
-    def compare(self, old_result):
-        if not(old_result and self.result): raise HierarchyInconsistent()
-
-class GridLocationsProperties(YTStaticOutputTest):
-    name = "level_consistency"
-    def run(self):
-        self.result = dict(grid_left_edge = self.pf.h.grid_left_edge,
-                           grid_right_edge = self.pf.h.grid_right_edge,
-                           grid_levels = self.pf.h.grid_levels,
-                           grid_particle_count = self.pf.h.grid_particle_count,
-                           grid_dimensions = self.pf.h.grid_dimensions)
-
-    def compare(self, old_result):
-        # We allow now difference between these values
-        self.compare_data_arrays(self.result, old_result, 0.0)
-
-class GridRelationshipsChanged(RegressionTestException):
-    pass
-
-class GridRelationships(YTStaticOutputTest):
-
-    name = "grid_relationships"
-    def run(self):
-        self.result = [ [p.id for p in ensure_list(g.Parent) if g.Parent is not None]
-                        for g in self.pf.h.grids ]
-
-    def compare(self, old_result):
-        if len(old_result) != len(self.result):
-            raise GridRelationshipsChanged()
-        for plist1, plist2 in zip(old_result, self.result):
-            if len(plist1) != len(plist2): raise GridRelationshipsChanged()
-            if not all( (p1 == p2 for p1, p2 in zip(plist1, plist2) ) ):
-                raise GridRelationshipsChanged()
-
-class GridGlobalIndices(YTStaticOutputTest):
-    name = "global_startindex"
-
-    def run(self):
-        self.result = na.array([g.get_global_startindex()
-                                for g in self.pf.h.grids])
-
-    def compare(self, old_result):
-        self.compare_array_delta(old_result, self.result, 0.0)


--- a/tests/answer_tests/object_field_values.py	Mon May 09 22:42:45 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-import hashlib
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTStaticOutputTest, RegressionTestException, create_test
-from yt.funcs import ensure_list
-from fields_to_test import field_list, particle_field_list
-
-class FieldHashesDontMatch(RegressionTestException):
-    pass
-
-class YTFieldValuesTest(YTStaticOutputTest):
-    def run(self):
-        vals = self.data_object[self.field].copy()
-        vals.sort()
-        self.result = hashlib.sha256(vals.tostring()).hexdigest()
-
-    def compare(self, old_result):
-        if self.result != old_result: raise FieldHashesDontMatch
-
-class CenteredSphere(YTFieldValuesTest):
-
-    def setup(self):
-        YTFieldValuesTest.setup(self)
-        center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-        width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-        self.data_object = self.pf.h.sphere(center, width/0.25)
-
-for field in field_list + particle_field_list:
-    create_test(CenteredSphere, "centered_sphere_%s" % (field), field = field)
-
-class OffCenteredSphere(YTFieldValuesTest):
-
-    def setup(self):
-        YTFieldValuesTest.setup(self)
-        center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-        width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-        self.data_object = self.pf.h.sphere(center - 0.25 * width, width/0.25)
-
-for field in field_list + particle_field_list:
-    create_test(OffCenteredSphere, "off_centered_sphere_%s" % (field), field = field)
-
-class CornerSphere(YTFieldValuesTest):
-
-    def setup(self):
-        YTFieldValuesTest.setup(self)
-        width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-        self.data_object = self.pf.h.sphere(self.pf.domain_left_edge, width/0.25)
-
-for field in field_list + particle_field_list:
-    create_test(CornerSphere, "corner_sphere_%s" % (field), field = field)
-
-class AllData(YTFieldValuesTest):
-    def setup(self):
-        YTFieldValuesTest.setup(self)
-        self.data_object = self.pf.h.all_data()
-
-for field in field_list + particle_field_list:
-    create_test(AllData, "all_data_%s" % (field), field = field)


--- a/tests/answer_tests/projections.py	Mon May 09 22:42:45 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,16 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestGasDistribution
-from fields_to_test import field_list
-
-for axis in range(3):
-    for field in field_list:
-        create_test(TestProjection, "projection_test_%s_%s" % (axis, field),
-                    field = field, axis = axis)
-
-for field in field_list:
-    create_test(TestGasDistribution, "profile_density_test_%s" % field,
-                field_x = "Density", field_y = field)
-
-


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/fields_to_test.py	Tue May 10 03:44:02 2011 -0400
@@ -0,0 +1,9 @@
+# We want to test several things.  We need to be able to run the 
+
+field_list = ["Density", "Temperature", "x-velocity", "y-velocity", "z-velocity",
+                  # Now some derived fields
+                  "Pressure", "SoundSpeed", "particle_density", "Entropy",
+                  # Ghost zones
+                  "AveragedDensity", "DivV"]
+
+particle_field_list = ["particle_position_x", "ParticleMassMsun"]


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/hierarchy_consistency.py	Tue May 10 03:44:02 2011 -0400
@@ -0,0 +1,59 @@
+import numpy as na
+
+from yt.utilities.answer_testing.output_tests import \
+    YTStaticOutputTest, RegressionTestException
+from yt.funcs import ensure_list
+
+class HierarchyInconsistent(RegressionTestException):
+    pass
+
+class HierarchyConsistency(YTStaticOutputTest):
+    name = "hierarchy_consistency"
+    def run(self):
+        self.result = \
+            all( g in ensure_list(c.Parent) for g in self.pf.h.grids
+                                            for c in g.Children )
+
+    def compare(self, old_result):
+        if not(old_result and self.result): raise HierarchyInconsistent()
+
+class GridLocationsProperties(YTStaticOutputTest):
+    name = "level_consistency"
+    def run(self):
+        self.result = dict(grid_left_edge = self.pf.h.grid_left_edge,
+                           grid_right_edge = self.pf.h.grid_right_edge,
+                           grid_levels = self.pf.h.grid_levels,
+                           grid_particle_count = self.pf.h.grid_particle_count,
+                           grid_dimensions = self.pf.h.grid_dimensions)
+
+    def compare(self, old_result):
+        # We allow now difference between these values
+        self.compare_data_arrays(self.result, old_result, 0.0)
+
+class GridRelationshipsChanged(RegressionTestException):
+    pass
+
+class GridRelationships(YTStaticOutputTest):
+
+    name = "grid_relationships"
+    def run(self):
+        self.result = [ [p.id for p in ensure_list(g.Parent) if g.Parent is not None]
+                        for g in self.pf.h.grids ]
+
+    def compare(self, old_result):
+        if len(old_result) != len(self.result):
+            raise GridRelationshipsChanged()
+        for plist1, plist2 in zip(old_result, self.result):
+            if len(plist1) != len(plist2): raise GridRelationshipsChanged()
+            if not all( (p1 == p2 for p1, p2 in zip(plist1, plist2) ) ):
+                raise GridRelationshipsChanged()
+
+class GridGlobalIndices(YTStaticOutputTest):
+    name = "global_startindex"
+
+    def run(self):
+        self.result = na.array([g.get_global_startindex()
+                                for g in self.pf.h.grids])
+
+    def compare(self, old_result):
+        self.compare_array_delta(old_result, self.result, 0.0)


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/object_field_values.py	Tue May 10 03:44:02 2011 -0400
@@ -0,0 +1,59 @@
+import hashlib
+import numpy as na
+
+from yt.utilities.answer_testing.output_tests import \
+    YTStaticOutputTest, RegressionTestException, create_test
+from yt.funcs import ensure_list
+from fields_to_test import field_list, particle_field_list
+
+class FieldHashesDontMatch(RegressionTestException):
+    pass
+
+class YTFieldValuesTest(YTStaticOutputTest):
+    def run(self):
+        vals = self.data_object[self.field].copy()
+        vals.sort()
+        self.result = hashlib.sha256(vals.tostring()).hexdigest()
+
+    def compare(self, old_result):
+        if self.result != old_result: raise FieldHashesDontMatch
+
+class CenteredSphere(YTFieldValuesTest):
+
+    def setup(self):
+        YTFieldValuesTest.setup(self)
+        center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+        width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
+        self.data_object = self.pf.h.sphere(center, width/0.25)
+
+for field in field_list + particle_field_list:
+    create_test(CenteredSphere, "centered_sphere_%s" % (field), field = field)
+
+class OffCenteredSphere(YTFieldValuesTest):
+
+    def setup(self):
+        YTFieldValuesTest.setup(self)
+        center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+        width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
+        self.data_object = self.pf.h.sphere(center - 0.25 * width, width/0.25)
+
+for field in field_list + particle_field_list:
+    create_test(OffCenteredSphere, "off_centered_sphere_%s" % (field), field = field)
+
+class CornerSphere(YTFieldValuesTest):
+
+    def setup(self):
+        YTFieldValuesTest.setup(self)
+        width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
+        self.data_object = self.pf.h.sphere(self.pf.domain_left_edge, width/0.25)
+
+for field in field_list + particle_field_list:
+    create_test(CornerSphere, "corner_sphere_%s" % (field), field = field)
+
+class AllData(YTFieldValuesTest):
+    def setup(self):
+        YTFieldValuesTest.setup(self)
+        self.data_object = self.pf.h.all_data()
+
+for field in field_list + particle_field_list:
+    create_test(AllData, "all_data_%s" % (field), field = field)


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/projections.py	Tue May 10 03:44:02 2011 -0400
@@ -0,0 +1,16 @@
+from yt.utilities.answer_testing.output_tests import \
+    SingleOutputTest, create_test
+from yt.utilities.answer_testing.hydro_tests import \
+    TestProjection, TestGasDistribution
+from fields_to_test import field_list
+
+for axis in range(3):
+    for field in field_list:
+        create_test(TestProjection, "projection_test_%s_%s" % (axis, field),
+                    field = field, axis = axis)
+
+for field in field_list:
+    create_test(TestGasDistribution, "profile_density_test_%s" % field,
+                field_x = "Density", field_y = field)
+
+


--- a/tests/runall.py	Mon May 09 22:42:45 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-"""
-Basic unit test suite to run all the tests at once. Hopefully this it's
-clear how to append additional tests.
-
-You can run this using: 
-
-$ python tests/runall.py
-
-This should be done from the root directory of the installation.
-
-YT can either be installed globally, or the extensions build with:
-
-$ python setup.py build_ext --inplace
-"""
-
-import unittest
-
-import test_lagos
-import test_raven
-import test_hdf5_reader
-
-def get_suite():
-    suite_l = unittest.defaultTestLoader.loadTestsFromModule(test_lagos)
-    suite_r = unittest.defaultTestLoader.loadTestsFromModule(test_raven)
-    suite_h = unittest.defaultTestLoader.loadTestsFromModule(test_hdf5_reader)
-    suite = unittest.TestSuite([suite_l, suite_r])
-    return suite
-
-if __name__ == '__main__':
-    unittest.main(defaultTest='get_suite')


--- a/tests/test_hdf5_reader.py	Mon May 09 22:42:45 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-import unittest, numpy, tables, sys, os, os.path
-sys.path.insert(0,".")
-
-from new import classobj
-
-from yt.lagos.HDF5LightReader import ReadData, ReadingError
-
-my_dtypes = ['short','int','longlong','ushort','uint','ulonglong',
-             'float','double']
-
-class HDF5LightTestIOBase(object):
-    def setUp(self):
-        self.rand_array = numpy.random.random(3000).reshape((30,10,10)).astype(self.dtype)
-    def test_check_io(self):
-        my_table = tables.openFile("testing_h5lt_io.h5","w")
-        my_table.createArray("/","%s" % (self.dtype),self.rand_array)
-        my_table.close()
-        recv_array = ReadData("testing_h5lt_io.h5", "/%s" % (self.dtype))
-        self.assert_(numpy.all(recv_array == self.rand_array))
-        self.assert_(recv_array.shape == self.rand_array.shape)
-        self.assertTrue(recv_array.flags.owndata)
-    def tearDown(self):
-        os.unlink("testing_h5lt_io.h5")
-
-for dtype in my_dtypes:
-    temp = classobj("TestingIO_%s" % (dtype),
-            (HDF5LightTestIOBase,unittest.TestCase), {'dtype':dtype})
-    exec('TestingIO_%s = temp' % dtype)
-
-class HDF5LightTestError(unittest.TestCase):
-    def test_no_file(self):
-        fn = "%s.h5" % int(numpy.random.random(1) * 1e6)
-        self.assertRaises(ReadingError, ReadData,fn,"/Nothing")
-    def test_no_dataset(self):
-        fn = "%s.h5" % int(numpy.random.random(1) * 1e6)
-        my_table = tables.openFile("testing_h5lt_io.h5","w")
-        my_table.close()
-        self.assertRaises(ReadingError, ReadData,fn,"/Nothing")
-    def tearDown(self):
-        if os.path.exists("testing_h5lt_io.h5"): os.unlink("testing_h5lt_io.h5")
-
-if __name__ == "__main__":
-    unittest.main()


--- a/tests/test_lagos.py	Mon May 09 22:42:45 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,698 +0,0 @@
-"""
-Test that we can get outputs, and interact with them in some primitive ways.
-"""
-
-# @TODO: Add unit test for deleting field from FieldInfo
-
-import unittest, glob, os.path, os, sys, StringIO
-
-print "Reporting from %s" % (os.getcwd())
-sys.path = ['.'] + sys.path
-
-from yt.config import ytcfg
-ytcfg["yt","LogLevel"] = '50'
-ytcfg["yt","logFile"] = "False"
-ytcfg["yt","suppressStreamLogging"] = "True"
-ytcfg["lagos","serialize"] = "False"
-
-import cPickle
-import numpy as na
-#from yt.utilities.exceptions import *
-from yt.data_objects.field_info_container import \
-    ValidationException
-from yt.mods import *
-from yt.analysis_modules.level_sets.api import *
-from yt.utilities.linear_interpolators import \
-    UnilinearFieldInterpolator, \
-    BilinearFieldInterpolator, \
-    TrilinearFieldInterpolator
-
-# The dataset used is located at:
-# http://yt.spacepope.org/DD0018.zip
-fn = "DD0010/moving7_0010"
-fn = os.path.join(os.path.dirname(__file__), fn)
-
-class LagosTestingBase:
-    def setUp(self):
-        self.OutputFile = load(fn)
-        self.hierarchy = self.OutputFile.hierarchy
-        self.v, self.c = self.hierarchy.find_max("Density")
-        gp = os.path.join(os.path.dirname(fn),"*.yt")
-        ytFiles = glob.glob(gp)
-        for i in ytFiles:
-            #print "Removing %s" % (i)
-            os.unlink(i)
-
-    def tearDown(self):
-        if hasattr(self,'data'): del self.data
-        if hasattr(self,'region'): del self.region
-        if hasattr(self,'ind_to_get'): del self.ind_to_get
-        del self.OutputFile, self.hierarchy
-        
-class TestParameterFileStore(unittest.TestCase):
-    def setUp(self):
-        self.original = (yt.config.ytcfg.get("yt","ParameterFileStore"),
-                         yt.config.ytcfg.get("lagos","serialize"))
-        ytcfg['yt','ParameterFileStore'] = "testing.csv"
-        pfs = ParameterFileStore()
-        os.unlink(pfs._get_db_name())
-        self.pfs = ParameterFileStore() # __init__ gets called again
-        ytcfg['lagos', 'serialize'] = "True"
-
-    def testCacheFile(self):
-        pf1 = load(fn)
-        pf2 = self.pfs.get_pf_hash(pf1._hash())
-        self.assertTrue(pf1 is pf2)
-
-    def testGrabFile(self):
-        pf1 = load(fn)
-        hash = pf1._hash()
-        del pf1
-        pf2 = self.pfs.get_pf_hash(hash)
-        self.assertTrue(hash == pf2._hash())
-
-    def testGetCurrentTimeID(self):
-        pf1 = load(fn)
-        hash = pf1._hash()
-        ctid = pf1["CurrentTimeIdentifier"]
-        del pf1
-        pf2 = self.pfs.get_pf_ctid(ctid)
-        self.assertTrue(hash == pf2._hash())
-
-    def tearDown(self):
-        os.unlink(self.pfs._get_db_name())
-        ytcfg['yt', 'ParameterFileStore'] = self.original[0]
-        ytcfg['lagos', 'serialize'] = self.original[1]
-        self.pfs.__init__()
-
-class TestHierarchy(LagosTestingBase, unittest.TestCase):
-    def testGetHierarchy(self):
-        self.assert_(self.OutputFile.hierarchy != None)
-
-    def testGetUnits(self):
-        self.assert_(self.OutputFile["cm"] != 1.0)
-
-    def testGetSmallestDx(self):
-        self.assertAlmostEqual(self.hierarchy.get_smallest_dx(),
-                               0.00048828125, 7)
-
-    def testGetNumberOfGrids(self):
-        self.assertEqual(self.hierarchy.num_grids, len(self.hierarchy.grids))
-        self.assertEqual(self.hierarchy.num_grids, 10)
-
-    def testChildrenOfRootGrid(self):
-        for child in self.hierarchy.grids[0].Children:
-            self.assert_(child.Parent.id == self.hierarchy.grids[0].id)
-
-    def testGetSelectLevels(self):
-        for level in range(self.hierarchy.max_level+1):
-            for grid in self.hierarchy.select_grids(level):
-                self.assert_(grid.Level == level)
-
-    def testPrintStats(self):
-        a = sys.stdout
-        sys.stdout = StringIO.StringIO()
-        try:
-            self.hierarchy.print_stats()
-            worked = True
-        except:
-            worked = False
-        sys.stdout = a
-        self.assert_(worked)
-
-    def testDataTypes(self):
-        r=self.hierarchy.region(
-                     [0.5,0.5,0.5],[0.0, 0.0, 0.0],
-                     [1.0, 1.0, 1.0],
-                     ["CellMass","Temperature"])
-            # Testing multiple fields fed in
-        s=self.hierarchy.sphere(
-                     [0.5,0.5,0.5],2.0,
-                     ["CellMass","Temperature"])
-        ms = s["CellMass"].sum() # Testing adding new field transparently
-        mr = r["CellMass"].sum() # Testing adding new field transparently
-        self.assertEqual(ms,mr)  # Asserting equality between the two
-
-    def testProjectionCorrectnessMultipleFields(self):
-        p = self.hierarchy.proj(0,["Density","Ones"], weight=None) # Unweighted
-        self.assertTrue(na.all(p["Ones"] == 1.0))
-
-    def testProjectionMakingMultipleFields(self):
-        p = self.hierarchy.proj(0,["Density","Temperature","Ones"],weight_field="Ones") # Unweighted
-        # One for each field, pdx, pdy, px, py, and one for the weight
-        self.assertEqual(len(p.data.keys()), 8)
-
-    def testProjectionSuccess(self):
-        p = self.hierarchy.proj(0,"Density") # Unweighted
-        p = self.hierarchy.proj(1,"Temperature","Density") # Weighted
-        p = self.hierarchy.proj(2,"Entropy") # Derived field
-
-    def testUnweightedProjectionCorrectness(self):
-        # Now we test that we get good answers
-        for axis in range(3):
-            p = self.hierarchy.proj(axis, "Ones") # Derived field
-            self.assertTrue(na.all(p["Ones"] == 1.0))
-            # Regardless of weighting, we want ones back
-
-    def testWeightedProjectionCorrectness(self):
-        # Now we test that we get good answers
-        for axis in range(3):
-            # Regardless of weighting, we want ones back
-            p = self.hierarchy.proj(axis, "Ones", "Density")
-            self.assertTrue(na.all(p["Ones"] == 1.0))
-
-# Now we test each datatype in turn
-
-def _returnFieldFunction(field):
-    def field_function(self):
-        try:
-            self.data[field.name]
-            if not field.particle_type and not field.vector_field and \
-                self.data[field.name].size > 1:
-                self.assertEqual(na.product(self.data["Density"].shape),
-                                 na.product(self.data[field.name].shape))
-            del self.data[field.name]
-        except ValidationException:
-            pass
-    return field_function
-
-def _returnProfile1DFunction(field, weight, accumulation, lazy):
-    def add_field_function(self):
-        self.data.set_field_parameter("center",[.5,.5,.5])
-        profile = BinnedProfile1D(
-            self.data, 8, "RadiusCode", 0, 1.0, False, lazy)
-        profile.add_fields(field, weight=weight, accumulation=accumulation)
-    return add_field_function
-
-def _returnProfile2DFunction(field, weight, accumulation, lazy):
-    def add_field_function(self):
-        self.data.set_field_parameter("center",[.5,.5,.5])
-        cv_min = self.hierarchy.get_smallest_dx()**3.0
-        cv_max = 1.0 / max(self.OutputFile["TopGridDimensions"])
-        profile = BinnedProfile2D(self.data,
-                    8, "RadiusCode", 1e-3, 1.0, True,
-                    8, "CellVolumeCode", cv_min, cv_max, True, lazy)
-        profile.add_fields(field, weight=weight, accumulation=accumulation)
-    return add_field_function
-
-class DataTypeTestingBase:
-    def setUp(self):
-        LagosTestingBase.setUp(self)
-
-    def testRepr(self):
-        self.assertTrue(
-            ("%s" % self.data).startswith(self.data.__class__.__name__))
-
-class Data3DBase:
-    def testProfileAccumulateMass(self):
-        self.data.set_field_parameter("center",[0.5]*3)
-        profile = BinnedProfile1D(self.data, 8, "RadiusCode", 0, 1.0,
-                                           False, True)
-        profile.add_fields("CellMassMsun", weight=None, accumulation=True)
-        v1 = profile["CellMassMsun"].max()
-        v2 = self.data["CellMassMsun"].sum()
-        v2 = na.abs(1.0 - v2/v1)
-        self.assertAlmostEqual(v2, 0.0, 7)
-
-    def testExtractConnectedSetsNoCache(self):
-        mi = self.data["Density"].min() * 2.0
-        ma = self.data["Density"].max() * 0.99
-        cons, contours = self.data.extract_connected_sets(
-            "Density", 2, mi, ma)
-        self.assertEqual(len(contours), 2) # number of contour levels
-        self.assertEqual(len(contours[0]), 2)
-        self.assertEqual(len(contours[1]), 1)
-
-    def testExtractConnectedSetsCache(self):
-        mi = self.data["Density"].min() * 2.0
-        ma = self.data["Density"].max() * 0.99
-        cons, contours = self.data.extract_connected_sets(
-            "Density", 2, mi, ma, cache=True)
-        self.assertEqual(len(contours), 2) # number of contour levels
-        self.assertEqual(len(contours[0]), 2)
-        self.assertEqual(len(contours[1]), 1)
-
-    def testContoursCache(self):
-        cid = identify_contours(self.data, "Density",
-                self.data["Density"].min()*2.00,
-                self.data["Density"].max()*1.01)
-        self.assertEqual(len(cid), 2)
-
-    def testContoursObtain(self):
-        cid = identify_contours(self.data, "Density",
-                self.data["Density"].min()*2.00, self.data["Density"].max()*1.01)
-        self.assertEqual(len(cid), 2)
-
-    def testContoursValidityMax(self):
-        v1 = self.data["Density"].max()*0.99
-        v2 = self.data["Density"].max()*1.01
-        cid = identify_contours(self.data, "Density", v1, v2)
-        self.assertTrue(na.all(v1 < self.data["Density"][cid[0]])
-                    and na.all(v2 > self.data["Density"][cid[0]]))
-        self.assertEqual(len(cid), 1)
-
-    def testContoursValidityMin(self):
-        v1 = self.data["Density"].min()*0.99
-        v2 = self.data["Density"].min()*1.01
-        cid = identify_contours(self.data, "Density", v1, v2)
-        self.assertTrue(na.all(v1 < self.data["Density"][cid[0]])
-                    and na.all(v2 > self.data["Density"][cid[0]]))
-        self.assertEqual(len(cid), 3)
-
-    def testPickle(self):
-        ps = cPickle.dumps(self.data)
-        pf, obj = cPickle.loads(ps)
-        self.assertEqual(obj["CellMassMsun"].sum(), self.data["CellMassMsun"].sum())
-
-for field_name in FieldInfo:
-    if field_name.startswith("PT"): continue
-    field = FieldInfo[field_name]
-    setattr(DataTypeTestingBase, "test%s" % field.name, _returnFieldFunction(field))
-
-field = "Temperature"
-for weight in [None, "CellMassMsun"]:
-    for lazy in [True, False]:
-        for accumulation in [True, False]:
-            func = _returnProfile1DFunction(field, weight, accumulation, lazy)
-            name = "test%sProfile1D_w%s_l%s_a%s" % (field,
-                                                weight, lazy,
-                                                accumulation)
-            setattr(Data3DBase, name, func)
-
-for weight in [None, "CellMassMsun"]:
-    for lazy in [True, False]:
-        for accumulation_x in [True, False]:
-            for accumulation_y in [True, False]:
-                acc = (accumulation_x, accumulation_y)
-                func = _returnProfile2DFunction(field, weight, acc, lazy)
-                name = "test%sProfile2D_w%s_l%s_a%s_a%s" % (field,
-                                                        weight, lazy,
-                                                        accumulation_x,
-                                                        accumulation_y)
-                setattr(Data3DBase, name, func)
-
-class TestSmoothedCoveringGrid(LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        LagosTestingBase.setUp(self)
-
-    def testAllCover(self):
-        DIMS = 32
-        for i in range(self.hierarchy.max_level+1):
-            dx = (DIMS*2**i)**-1
-            LE = na.array([0.5,0.5,0.5])-(dx*DIMS/2.0)
-            RE = na.array([0.5,0.5,0.5])+(dx*DIMS/2.0)
-            cg = self.hierarchy.smoothed_covering_grid(
-                    level=i, left_edge=LE, right_edge=RE,
-                    dims=[DIMS]*3, fields=["Density"])
-            self.assertFalse(na.any(na.isnan(cg["Density"])))
-            self.assertFalse(na.any(cg["Density"]==-999))
-
-    def testAddField(self):
-        DIMS = 64
-        i = 5
-        dx = (DIMS*2**i)**-1
-        LE = na.array([0.5,0.5,0.5])-(dx*DIMS/2.0)
-        RE = na.array([0.5,0.5,0.5])+(dx*DIMS/2.0)
-        cg = self.hierarchy.smoothed_covering_grid(
-                level=i, left_edge=LE, right_edge=RE,
-                dims=[DIMS]*3, fields=["Density"])
-        self.assertFalse(na.any(na.isnan(cg["Temperature"])))
-        self.assertFalse(na.any(cg["Temperature"]==-999))
-
-class TestIntSmoothedCoveringGrid(LagosTestingBase, unittest.TestCase):
-    def setUp(self):        
-        LagosTestingBase.setUp(self)
-
-    def testCoordinates(self):
-        # We skip the first grid because it has ghost zones on all sides
-        for g in self.hierarchy.grids[1:]:
-            LE = g.LeftEdge - g.dds
-            level = g.Level
-            dims = g.ActiveDimensions + 2
-            g1 = self.hierarchy.si_covering_grid(level, LE, dims)
-            g2 = g.retrieve_ghost_zones(1, ["x","y","z"], smoothed=False)
-            for field in 'xyz':
-                diff = na.abs((g1[field] - g2[field])/(g1[field] + g2[field]))
-                self.assertAlmostEqual(diff.max(), 0.0, 1e-14)
-
-class TestDataCube(LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        LagosTestingBase.setUp(self)
-
-    def testNoGhost(self):
-        DW = self.OutputFile["DomainRightEdge"] \
-           - self.OutputFile["DomainLeftEdge"]
-        for g in self.hierarchy.grids:
-            cube = g.retrieve_ghost_zones(0, "Density")
-            self.assertTrue(na.all(cube["Density"] == g["Density"]))
-            cube["Density"] = na.arange(cube["Density"].size).reshape(cube["Density"].shape)
-            cube.flush_data(field="Density")
-            self.assertTrue(na.all(g["Density"] == cube["Density"]))
-
-    def testOffsetDomain(self):
-        DW = self.OutputFile["DomainRightEdge"] \
-           - self.OutputFile["DomainLeftEdge"]
-        for g in self.hierarchy.grids:
-            cube = self.hierarchy.covering_grid(g.Level,
-                g.LeftEdge+DW, g.ActiveDimensions)
-            self.assertTrue(na.all(g["Density"] == cube["Density"]))
-
-    def testTwoGhost(self):
-        for g in self.hierarchy.grids:
-            cube = g.retrieve_ghost_zones(2, "Density")
-
-    def testMultipleFields(self):
-        for g in self.hierarchy.grids:
-            cube1 = g.retrieve_ghost_zones(0, ["Density","Temperature"])
-            self.assertTrue(na.all(cube1["Density"] == g["Density"]))
-            self.assertTrue(na.all(cube1["Temperature"] == g["Temperature"]))
-            cube2a = g.retrieve_ghost_zones(0, "Density")
-            cube2b = g.retrieve_ghost_zones(0, "Temperature")
-            self.assertTrue(na.all(cube1["Density"] == cube2a["Density"]))
-            self.assertTrue(na.all(cube1["Temperature"] == cube2b["Temperature"]))
-    
-    def testFlushBackToGrids(self):
-        ml = self.hierarchy.max_level
-        cg = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
-        cg["Ones"] *= 2.0
-        cg.flush_data(field="Ones")
-        for g in na.concatenate([self.hierarchy.select_grids(i) for i in range(3)]):
-            self.assertEqual(g["Ones"].max(), 2.0)
-            self.assertEqual(g["Ones"][g["Ones"]*g.child_mask>0].min(), 2.0)
-
-    def testFlushBackToNewCover(self):
-        ml = self.hierarchy.max_level
-        cg = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
-        cg["tempContours"] = cg["Ones"] * 2.0
-        cg.flush_data(field="tempContours")
-        cg2 = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
-        self.assertTrue(na.all(cg["tempContours"] == cg2["tempContours"]))
-
-    def testRawFlushBack(self):
-        ml = self.hierarchy.max_level
-        cg = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
-        cg["DensityNew"] = cg["Density"] * 2.111
-        cg.flush_data(field="DensityNew")
-        for g in na.concatenate([self.hierarchy.select_grids(i) for i in range(3)]):
-            ni = g["DensityNew"] > 0
-            min_diff = (g["DensityNew"][ni]/g["Density"][ni]).max()
-            max_diff = (g["DensityNew"][ni]/g["Density"][ni]).min()
-            min_diff_i = na.argmin(g["DensityNew"][ni]/g["Density"][ni])
-            max_diff_i = na.argmax(g["DensityNew"][ni]/g["Density"][ni])
-            self.assertAlmostEqual(min_diff, 2.111, 5)
-            self.assertAlmostEqual(max_diff, 2.111, 5)
-
-    def testAllCover(self):
-        cg = self.hierarchy.covering_grid(1, [0.0]*3, [32,32,32])
-        mi, ma = 1e30, -1e30
-        for g in na.concatenate([self.hierarchy.select_grids(i) for i in range(2)]):
-            ma = max(ma, g["Density"].max())
-            mi = min(mi, g["Density"].min())
-        self.assertEqual(cg["Density"].max(), ma)
-        self.assertEqual(cg["Density"].min(), mi)
-
-    def testCellVolume(self):
-        cg = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
-        self.assertEqual(na.unique(cg["CellVolume"]).size, 1)
-
-
-class TestDiskDataType(Data3DBase, DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data=self.hierarchy.disk(
-                     [0.5,0.5,0.5],[0.2, 0.1, 0.5],1.0,1.0)
-
-class TestRegionDataType(Data3DBase, DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data=self.hierarchy.region(
-                     [0.5,0.5,0.5],[0.0, 0.0, 0.0],
-                     [1.0, 1.0, 1.0])
-    def testVolume(self):
-        vol = self.data["CellVolume"].sum() / self.data.convert("cm")**3.0
-        self.assertAlmostEqual(vol,1.0,7)
-
-class TestRegionStrictDataType(Data3DBase, DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data=self.hierarchy.region_strict(
-                     [0.5,0.5,0.5],[0.0, 0.0, 0.0],
-                     [1.0, 1.0, 1.0])
-    def testVolume(self):
-        vol = self.data["CellVolume"].sum() / self.data.convert("cm")**3.0
-        self.assertAlmostEqual(vol,1.0,7)
-
-class TestPeriodicRegionDataType(Data3DBase, DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data=self.hierarchy.periodic_region(
-                     [0.5,0.5,0.5],[0.5, 0.5, 0.5],
-                     [1.5,1.5,1.5])
-    def testVolume(self):
-        vol = self.data["CellVolume"].sum() / self.data.convert("cm")**3.0
-        self.assertAlmostEqual(vol,1.0,7)
-
-class TestPeriodicRegionStrictDataType(Data3DBase,
-            DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data=self.hierarchy.periodic_region_strict(
-                     [0.5,0.5,0.5],[0.5, 0.5, 0.5],
-                     [1.5,1.5,1.5])
-    def testVolume(self):
-        vol = self.data["CellVolume"].sum() / self.data.convert("cm")**3.0
-        self.assertAlmostEqual(vol,1.0,7)
-
-class TestSphereDataType(Data3DBase, DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data=self.hierarchy.sphere([0.5,0.5,0.5],1.0)
-    def testVolume(self):
-        vol = self.data["CellVolume"].sum() / self.data.convert("cm")**3.0
-        self.assertAlmostEqual(vol,1.0,7)
-
-class TestSliceDataType(DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data = self.hierarchy.slice(0,0.5, center=[0.5, 0.5, 0.5])
-
-class TestCuttingPlane(DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data = self.hierarchy.cutting([0.1,0.3,0.4], [0.5,0.5,0.5], ["Density"])
-    def testAxisVectors(self):
-        x_v = self.data._x_vec
-        y_v = self.data._y_vec
-        z_v = self.data._norm_vec
-        self.assertAlmostEqual(na.dot(x_v, y_v), 0.0, 7)
-        self.assertAlmostEqual(na.dot(x_v, z_v), 0.0, 7)
-        self.assertAlmostEqual(na.dot(y_v, z_v), 0.0, 7)
-    def testZHeight(self):
-        self.assertTrue(na.all(self.data['pz'] < self.data['dx']))
-
-class TestGridDataType(DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data = self.hierarchy.grids[0]
-
-class TestExtractFromSphere(TestSphereDataType):
-    def setUp(self):
-        TestSphereDataType.setUp(self)
-        self.region = self.data
-        self.ind_to_get = na.where(self.region["Temperature"]>500)
-        self.data = self.region.extract_region(self.ind_to_get)
-
-    def testNumberOfEntries(self):
-        self.assertEqual(self.ind_to_get[0].shape,
-                        self.data["Density"].shape)
-    def testVolume(self):
-        self.ind_to_get = na.where(self.region["CellVolume"]>0.0)
-        vol = self.region.extract_region(self.ind_to_get)["CellVolume"].sum() \
-            / self.data.convert("cm")**3.0
-        self.assertAlmostEqual(vol,1.0,7)
-
-    def testJoin(self):
-        new_region = self.region.extract_region(
-                self.region["Temperature"]<=500)
-        joined_region = self.data.join(new_region)
-        self.assertEqual(joined_region["CellMassMsun"].sum(),
-                         self.region["CellMassMsun"].sum())
-
-    # I have verified that the *old* version of the code overconnected
-    # contours outside.  This are overridden to make sure this does not
-    # happen again!
-    def testContoursObtain(self):
-        cid = identify_contours(self.data, "Density",
-                self.data["Density"].min()*2.00, self.data["Density"].max()*1.01)
-        self.assertEqual(len(cid), 10)
-
-    def testContoursCache(self):
-        cid = identify_contours(self.data, "Density",
-                self.data["Density"].min()*2.00,
-                self.data["Density"].max()*1.01)
-        self.assertEqual(len(cid), 10)
-
-    def testExtractConnectedSetsNoCache(self):
-        mi = self.data["Density"].min() * 2.0
-        ma = self.data["Density"].max() * 0.99
-        cons, contours = self.data.extract_connected_sets(
-            "Density", 2, mi, ma)
-        self.assertEqual(len(contours), 2) # number of contour levels
-        self.assertEqual(len(contours[0]), 10)
-        self.assertEqual(len(contours[1]), 1)
-
-    def testExtractConnectedSetsCache(self):
-        mi = self.data["Density"].min() * 2.0
-        ma = self.data["Density"].max() * 0.99
-        cons, contours = self.data.extract_connected_sets(
-            "Density", 2, mi, ma, cache=True)
-        self.assertEqual(len(contours), 2) # number of contour levels
-        self.assertEqual(len(contours[0]), 10)
-        self.assertEqual(len(contours[1]), 1)
-
-class TestExtractFromRegion(TestRegionDataType):
-    def setUp(self):
-        TestRegionDataType.setUp(self)
-        self.region = self.data
-        self.ind_to_get = na.where(self.region["Temperature"]>500)
-        self.data = self.region.extract_region(self.ind_to_get)
-    def testNumberOfEntries(self):
-        self.assertEqual(self.ind_to_get[0].shape,
-                        self.data["Density"].shape)
-    def testVolume(self):
-        ind_to_get = na.where(self.region["CellVolume"]>0.0)
-        vol = self.region.extract_region(ind_to_get)["CellVolume"].sum() \
-            / self.data.convert("cm")**3.0
-        self.assertAlmostEqual(vol,1.0,7)
-
-    def testJoin(self):
-        new_region = self.region.extract_region(
-                self.region["Temperature"]<=500)
-        joined_region = self.data.join(new_region)
-        self.assertEqual(joined_region["CellMassMsun"].sum(),
-                         self.region["CellMassMsun"].sum())
-
-    # I have verified that the *old* version of the code overconnected
-    # contours outside.  This are overridden to make sure this does not
-    # happen again!
-    def testContoursObtain(self):
-        cid = identify_contours(self.data, "Density",
-                self.data["Density"].min()*2.00, self.data["Density"].max()*1.01)
-        self.assertEqual(len(cid), 10)
-
-    def testContoursCache(self):
-        cid = identify_contours(self.data, "Density",
-                self.data["Density"].min()*2.00,
-                self.data["Density"].max()*1.01)
-        self.assertEqual(len(cid), 10)
-
-    def testExtractConnectedSetsNoCache(self):
-        mi = self.data["Density"].min() * 2.0
-        ma = self.data["Density"].max() * 0.99
-        cons, contours = self.data.extract_connected_sets(
-            "Density", 2, mi, ma)
-        self.assertEqual(len(contours), 2) # number of contour levels
-        self.assertEqual(len(contours[0]), 10)
-        self.assertEqual(len(contours[1]), 1)
-
-    def testExtractConnectedSetsCache(self):
-        mi = self.data["Density"].min() * 2.0
-        ma = self.data["Density"].max() * 0.99
-        cons, contours = self.data.extract_connected_sets(
-            "Density", 2, mi, ma, cache=True)
-        self.assertEqual(len(contours), 2) # number of contour levels
-        self.assertEqual(len(contours[0]), 10)
-        self.assertEqual(len(contours[1]), 1)
-
-
-class TestUnilinearInterpolator(unittest.TestCase):
-    def setUp(self):
-        x0, x1 = na.random.uniform(-100,100,2)
-        nstep_x = na.random.randint(10,200)
-        nvals = na.random.randint(100,1000)
-
-        table = na.mgrid[x0:x1:nstep_x*1j]
-
-        self.ufi_x = UnilinearFieldInterpolator(table,
-                      (x0,x1),'x')
-        self.my_dict = {}
-        self.my_dict['x'] = na.random.uniform(x0,x1,nvals)
-
-    def testXInt(self):
-        nv = self.ufi_x(self.my_dict)
-        for i,v in enumerate(nv):
-            self.assertAlmostEqual(v, self.my_dict['x'][i], 5)
-
-class TestBilinearInterpolator(unittest.TestCase):
-    def setUp(self):
-        x0, x1 = na.random.uniform(-100,100,2)
-        y0, y1 = na.random.uniform(-100,100,2)
-        nstep_x = na.random.randint(10,200)
-        nstep_y = na.random.randint(10,200)
-        nvals = na.random.randint(100,1000)
-
-        table = na.mgrid[x0:x1:nstep_x*1j,
-                         y0:y1:nstep_y*1j]
-
-        self.bfi_x = BilinearFieldInterpolator(table[0,...],
-                      (x0,x1,y0,y1),['x','y'])
-        self.bfi_y = BilinearFieldInterpolator(table[1,...],
-                      (x0,x1,y0,y1),['x','y'])
-        self.my_dict = {}
-        self.my_dict['x'] = na.random.uniform(x0,x1,nvals)
-        self.my_dict['y'] = na.random.uniform(y0,y1,nvals)
-
-    def testXInt(self):
-        nv = self.bfi_x(self.my_dict)
-        for i,v in enumerate(nv):
-            self.assertAlmostEqual(v, self.my_dict['x'][i], 5)
-
-    def testYInt(self):
-        nv = self.bfi_y(self.my_dict)
-        for i,v in enumerate(nv):
-            self.assertAlmostEqual(v, self.my_dict['y'][i], 5)
-
-class TestTrilinearInterpolator(unittest.TestCase):
-    def setUp(self):
-        x0, x1 = na.random.uniform(-100,100,2)
-        y0, y1 = na.random.uniform(-100,100,2)
-        z0, z1 = na.random.uniform(-100,100,2)
-        nstep_x = na.random.randint(10,200)
-        nstep_y = na.random.randint(10,200)
-        nstep_z = na.random.randint(10,200)
-        nvals = na.random.randint(100,1000)
-
-        table = na.mgrid[x0:x1:nstep_x*1j,
-                         y0:y1:nstep_y*1j,
-                         z0:z1:nstep_z*1j]
-
-        self.tfi_x = TrilinearFieldInterpolator(table[0,...],
-                      (x0,x1,y0,y1,z0,z1),['x','y','z'])
-        self.tfi_y = TrilinearFieldInterpolator(table[1,...],
-                      (x0,x1,y0,y1,z0,z1),['x','y','z'])
-        self.tfi_z = TrilinearFieldInterpolator(table[2,...],
-                      (x0,x1,y0,y1,z0,z1),['x','y','z'])
-        self.my_dict = {}
-        self.my_dict['x'] = na.random.uniform(x0,x1,nvals)
-        self.my_dict['y'] = na.random.uniform(y0,y1,nvals)
-        self.my_dict['z'] = na.random.uniform(z0,z1,nvals)
-
-    def testXInt(self):
-        nv = self.tfi_x(self.my_dict)
-        for i,v in enumerate(nv):
-            self.assertAlmostEqual(v, self.my_dict['x'][i], 5)
-
-    def testYInt(self):
-        nv = self.tfi_y(self.my_dict)
-        for i,v in enumerate(nv):
-            self.assertAlmostEqual(v, self.my_dict['y'][i], 5)
-
-    def testZInt(self):
-        nv = self.tfi_z(self.my_dict)
-        for i,v in enumerate(nv):
-            self.assertAlmostEqual(v, self.my_dict['z'][i], 5)
-
-
-
-if __name__ == "__main__":
-    unittest.main()


--- a/tests/test_raven.py	Mon May 09 22:42:45 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,175 +0,0 @@
-"""
-Test that we can make plots
-"""
-
-import unittest, glob, os.path, os, sys, StringIO
-
-print "Reporting from %s" % (os.getcwd())
-sys.path = ['.'] + sys.path
-
-from yt.config import ytcfg
-ytcfg["yt","LogLevel"] = '50'
-ytcfg["yt","logFile"] = "False"
-ytcfg["yt","suppressStreamLogging"] = "True"
-ytcfg["lagos","serialize"] = "False"
-
-import numpy as na
-import yt.lagos
-import yt.raven
-
-# The dataset used is located at:
-# http://yt.spacepope.org/DD0018.zip
-fn = "DD0010/moving7_0010"
-fn = os.path.join(os.path.dirname(__file__),fn)
-
-class RavenTestingBase:
-    def setUp(self):
-        self.OutputFile = yt.lagos.EnzoStaticOutput(fn)
-        self.hierarchy = self.OutputFile.hierarchy
-        self.pc = yt.raven.PlotCollection(self.OutputFile)
-        self.v, self.c = self.hierarchy.find_max("Density")
-        gp = os.path.join(os.path.dirname(fn),"*.yt")
-        ytFiles = glob.glob(gp)
-        for i in ytFiles:
-            os.unlink(i)
-        self.localSetup()
-
-    def tearDown(self):
-        if hasattr(self,'data'): del self.data
-        if hasattr(self,'region'): del self.region
-        if hasattr(self,'ind_to_get'): del self.ind_to_get
-        if hasattr(self,'pc'): del self.pc
-        del self.OutputFile, self.hierarchy
-        
-    def DoSave(self):
-        fns=self.pc.save("test")
-        for fn in fns:
-            os.unlink(fn)
-
-    def _testSlice(self):
-        self.pc.add_slice("Density",0)
-        self.pc.plots[-1].switch_z("CellMass")
-        self.pc.set_width(0.5,'1')
-        self.pc.set_zlim(1,1000)
-        self.pc.set_cmap("hot")
-        self.DoSave()
-
-    def _testProjection(self):
-        self.pc.add_projection("Temperature",1,weight_field="Density")
-        self.pc.set_width(0.5,'1')
-        self.pc.set_zlim(1,1000)
-        self.pc.set_cmap("hot")
-        self.DoSave()
-
-    def _testThreePhaseSphere(self):
-        print "Testing ThreePhase"
-        self.pc.add_phase_sphere(1.0,'1',["Density","Temperature","Density"],center=self.c)
-        self.DoSave()
-
-    def _testCallbacksOnSlices(self):
-        # We test a couple things here
-        # Add callbacks, then remove one, then do the plot-saving
-        # Both types of callback should be called here
-        for ax in range(3):
-            self.pc.add_slice("Density", 0)
-            x,y = yt.raven.axis_labels[ax]
-            v1 = "%s-velocity" % (x)
-            v2 = "%s-velocity" % (y)
-            qi = self.pc.plots[-1].add_callback(yt.raven.QuiverCallback(v1,v2,ax,32))
-            ti = self.pc.plots[-1].add_callback(yt.raven.ContourCallback("Temperature",
-                                               ncont=3, factor=10))
-            gi = self.pc.plots[-1].add_callback(yt.raven.ContourCallback("Gas_Energy",
-                                               ncont=3, factor=10))
-            self.pc.plots[-1].remove_callback(gi)
-        self.DoSave()
-
-class PlotTestingBase(RavenTestingBase):
-    def test_set_xlim(self):
-        self.pc.set_xlim(0.25,0.75)
-        self.DoSave()
-
-    def test_set_ylim(self):
-        self.pc.set_ylim(0.25,0.75)
-        self.DoSave()
-
-    def test_autoscale(self):
-        # verify autoscale changed
-        self.pc.autoscale()
-        self.DoSave()
-
-    def test_set_zlim(self):
-        self.pc.set_zlim(0.5, 1.0)
-        self.DoSave()
-
-    def test_set_lim(self):
-        self.pc.set_lim((0.25,0.75,0.25,0.75))
-        self.DoSave()
-
-    def test_set_width(self):
-        self.pc.set_width(0.25,'1')
-        self.DoSave()
-
-    def test_set_cmap(self):
-        self.pc.set_cmap("kamae")
-        self.DoSave()
-        self.pc.set_cmap("jet")
-        self.DoSave()
-
-    def test_switch_field(self):
-        for field in ["Temperature","x-velocity"]:
-            self.pc.switch_field(field)
-            # Check if the logging is set correctly
-            self.DoSave()
-
-    def test_clear_plots(self):
-        self.pc.clear_plots()
-        self.assertTrue(len(self.pc.plots) == 0)
-
-    def test_set_label(self):
-        for p in self.pc.plots: p.set_label(r"$\rm{Hi}$")
-        self.DoSave()
-        for p in self.pc.plots: p.set_label("Hi!")
-        self.DoSave()
-
-    def test_set_logfield(self):
-        for p in self.pc.plots: p.set_log_field(False)
-        self.DoSave()
-        for p in self.pc.plots: p.set_log_field(False)
-        self.DoSave()
-
-    def test_save(self):
-        self.DoSave()
-
-class TestSlices(PlotTestingBase, unittest.TestCase):
-    def localSetup(self):
-        self.pc.add_slice("Density",0)
-        self.pc.add_slice("Density",1)
-        self.pc.add_slice("Density",2)
-
-class TestSphere(PlotTestingBase, unittest.TestCase):
-    def localSetup(self):
-        self.pc.add_phase_sphere(1.0,'1',
-                ["Density","TotalEnergy","y-velocity"])
-
-class TestPhaseObject(PlotTestingBase, unittest.TestCase):
-    def localSetup(self):
-        obj = self.hierarchy.region([0.5]*3, [0.0]*3, [1.0]*3)
-        self.pc.add_phase_object(obj, ["Density","TotalEnergy","y-velocity"])
-
-class TestProjection(PlotTestingBase, unittest.TestCase):
-    def localSetup(self):
-        self.pc.add_projection("Density", 0)
-        self.pc.add_projection("Temperature", 1)
-        self.pc.add_projection("x-velocity", 2, weight_field="Density")
-
-class TestMixProjectionSlice(PlotTestingBase, unittest.TestCase):
-    def localSetup(self):
-        self.pc.add_projection("Density",0)
-        self.pc.add_slice("Density",0)
-
-class TestCuttingPlane(PlotTestingBase, unittest.TestCase):
-    def localSetup(self):
-        self.pc.add_cutting_plane("Density", [0.1,0.2,0.3])
-
-if __name__ == "__main__":
-    unittest.main()


http://bitbucket.org/yt_analysis/yt/changeset/11aeff1fd2d7/
changeset:   r4267:11aeff1fd2d7
branch:      yt
user:        MatthewTurk
date:        2011-05-10 10:40:12
summary:     Adding an answer test runner script to the testing directory.  Still some kinks
to work out, but this should be nearly enough to get answer testing going with
some momentum.

Note that we want to allow it to be run on multiple different parameter files
as well as multiple hashes, so it has a relatively verbose naming scheme.

Comparisons haven't really been tested.
affected #:  6 files (4.3 KB)

--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/runall.py	Tue May 10 04:40:12 2011 -0400
@@ -0,0 +1,95 @@
+from yt.config import ytcfg
+ytcfg["yt","serialize"] = "False"
+
+from yt.utilities.answer_testing.api import \
+    RegressionTestRunner, clear_registry, create_test, \
+    TestFieldStatistics, TestAllProjections, registry_entries
+
+from yt.utilities.command_line import get_yt_version
+
+from yt.mods import *
+import fnmatch
+import imp
+import optparse
+import itertools
+
+#
+# We assume all tests are to be run, unless explicitly given the name of a
+# single test or something that can be run through fnmatch.
+#
+# Keep in mind that we use a different nomenclature here than is used in the
+# Enzo testing system.  Our 'tests' are actually tests that are small and that
+# run relatively quickly on a single dataset; in Enzo's system, a 'test'
+# encompasses both the creation and the examination of data.  Here we assume
+# the data is kept constant.
+#
+
+cwd = os.path.dirname(globals().get("__file__", os.getcwd()))
+
+def load_tests(iname, idir):
+    f, filename, desc = imp.find_module(iname, [idir])
+    tmod = imp.load_module(iname, f, filename, desc)
+    return tmod
+
+def find_and_initialize_tests():
+    mapping = {}
+    for f in glob.glob(os.path.join(cwd,"*.py")):
+        clear_registry()
+        iname = os.path.basename(f[:-3])
+        try:
+            load_tests(iname, cwd)
+            mapping[iname] = registry_entries()
+            #print "Associating %s with" % (iname)
+            #print "\n    ".join(registry_entries())
+        except ImportError:
+            pass
+    return mapping
+
+if __name__ == "__main__":
+    mapping = find_and_initialize_tests()
+    test_storage_directory = ytcfg.get("yt","test_storage_dir")
+    my_hash = get_yt_version()
+    parser = optparse.OptionParser()
+    parser.add_option("-f", "--parameter-file", dest="parameter_file",
+                      default = os.path.join(cwd, "DD0010/moving7_0010"),
+                      help = "The parameter file value to feed to 'load' to test against",
+                      )
+    parser.add_option("-l", "--list", dest="list_tests", action="store_true",
+                      default = False, help = "List all tests and then exit")
+    parser.add_option("-t", "--tests", dest="test_pattern", default="*",
+                      help = "The test name pattern to match.  Can include wildcards.")
+    parser.add_option("-o", "--output", dest="storage_dir",
+                      default=test_storage_directory,
+                      help = "Base directory for storing test output.")
+    parser.add_option("-c", "--compare", dest="compare_name",
+                      default=None,
+                      help = "The name against which we will compare")
+    parser.add_option("-n", "--name", dest="this_name",
+                      default=my_hash,
+                      help = "The name we'll call this set of tests")
+    opts, args = parser.parse_args()
+    if opts.list_tests:
+        print "\n    ".join(sorted(itertools.chain(*mapping.values())))
+        sys.exit(0)
+    pf = load(opts.parameter_file)
+    if pf is None:
+        print "Couldn't load the specified parameter file."
+        sys.exit(1)
+    # Now we modify our compare name and self name to include the pf.
+    compare_id = opts.compare_name
+    if compare_id is not None: compare_id += "_%s_%s" % (pf, pf._hash())
+    this_id = opts.this_name + "%s_%s" % (pf, pf._hash())
+    rtr = RegressionTestRunner(this_id, compare_id,
+            compare_results_path = opts.storage_dir,
+            io_log = [opts.parameter_file])
+    tests_to_run = []
+    for m, vals in mapping.items():
+        print vals, opts.test_pattern
+        new_tests = fnmatch.filter(vals, opts.test_pattern)
+        if len(new_tests) == 0: continue
+        tests_to_run += new_tests
+        load_tests(m, cwd)
+    for test_name in sorted(tests_to_run):
+        rtr.run_test(test_name)
+    for test_name, result in sorted(rtr.passed_tests.items()):
+        print "TEST %s: %s" % (test_name, result)


--- a/yt/config.py	Tue May 10 03:44:02 2011 -0400
+++ b/yt/config.py	Tue May 10 04:40:12 2011 -0400
@@ -47,6 +47,7 @@
     pluginfilename = 'my_plugins.py',
     parallel_traceback = 'False',
     pasteboard_repo = '',
+    test_storage_dir = '/does/not/exist',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten


--- a/yt/utilities/answer_testing/api.py	Tue May 10 03:44:02 2011 -0400
+++ b/yt/utilities/answer_testing/api.py	Tue May 10 04:40:12 2011 -0400
@@ -32,7 +32,8 @@
     RegressionTestRunner, \
     RegressionTestStorage, \
     run_main, \
-    clear_registry
+    clear_registry, \
+    registry_entries
 
 from .output_tests import \
     YTStaticOutputTest, \


--- a/yt/utilities/answer_testing/output_tests.py	Tue May 10 03:44:02 2011 -0400
+++ b/yt/utilities/answer_testing/output_tests.py	Tue May 10 04:40:12 2011 -0400
@@ -158,8 +158,11 @@
         self.io_log = io_log
 
     def __iter__(self):
-        for line in open(self.io_log):
-            yield line[len(self.io_log_header):].split()[0].strip()
+        if isinstance(self.io_log, types.StringTypes):
+            for line in open(self.io_log):
+                yield line[len(self.io_log_header):].split()[0].strip()
+        elif isinstance(self.io_log, types.ListType):
+            for line in self.io_log: yield line
 
 def create_test(base, new_name, **attrs):
     """


--- a/yt/utilities/answer_testing/runner.py	Tue May 10 03:44:02 2011 -0400
+++ b/yt/utilities/answer_testing/runner.py	Tue May 10 04:40:12 2011 -0400
@@ -36,6 +36,9 @@
 def clear_registry():
     test_registry.clear()
 
+def registry_entries():
+    return test_registry.keys()
+
 class RegressionTestStorage(object):
     def __init__(self, results_id, path = "."):
         self.id = results_id


--- a/yt/utilities/command_line.py	Tue May 10 03:44:02 2011 -0400
+++ b/yt/utilities/command_line.py	Tue May 10 04:40:12 2011 -0400
@@ -270,6 +270,13 @@
     commands.identify(u, repo)
     return u.popbuffer()
 
+def get_yt_version():
+    import pkg_resources
+    yt_provider = pkg_resources.get_provider("yt")
+    path = os.path.dirname(yt_provider.module_path)
+    version = _get_hg_version(path)[:12]
+    return version
+
 class YTCommands(cmdln.Cmdln):
     name="yt"
 


http://bitbucket.org/yt_analysis/yt/changeset/d9b9579e6ce3/
changeset:   r4268:d9b9579e6ce3
branch:      yt
user:        MatthewTurk
date:        2011-05-10 10:51:15
summary:     Minor change, to allow results to be stored somewhere centrally.
affected #:  1 file (45 bytes)

--- a/tests/runall.py	Tue May 10 04:40:12 2011 -0400
+++ b/tests/runall.py	Tue May 10 04:51:15 2011 -0400
@@ -80,6 +80,7 @@
     if compare_id is not None: compare_id += "_%s_%s" % (pf, pf._hash())
     this_id = opts.this_name + "%s_%s" % (pf, pf._hash())
     rtr = RegressionTestRunner(this_id, compare_id,
+            results_path = opts.storage_dir,
             compare_results_path = opts.storage_dir,
             io_log = [opts.parameter_file])
     tests_to_run = []


http://bitbucket.org/yt_analysis/yt/changeset/bc50e0ff898c/
changeset:   r4269:bc50e0ff898c
branch:      yt
user:        MatthewTurk
date:        2011-05-10 17:51:34
summary:     Raising the loglevel of the test runner, fixing a problem with array deltas for
int arrays, and adding another underscore to the results naming.
affected #:  2 files (54 bytes)

--- a/tests/runall.py	Tue May 10 04:51:15 2011 -0400
+++ b/tests/runall.py	Tue May 10 11:51:34 2011 -0400
@@ -1,4 +1,5 @@
 from yt.config import ytcfg
+ytcfg["yt","loglevel"] = "50"
 ytcfg["yt","serialize"] = "False"
 
 from yt.utilities.answer_testing.api import \
@@ -78,7 +79,7 @@
     # Now we modify our compare name and self name to include the pf.
     compare_id = opts.compare_name
     if compare_id is not None: compare_id += "_%s_%s" % (pf, pf._hash())
-    this_id = opts.this_name + "%s_%s" % (pf, pf._hash())
+    this_id = opts.this_name + "_%s_%s" % (pf, pf._hash())
     rtr = RegressionTestRunner(this_id, compare_id,
             results_path = opts.storage_dir,
             compare_results_path = opts.storage_dir,


--- a/yt/utilities/answer_testing/output_tests.py	Tue May 10 04:51:15 2011 -0400
+++ b/yt/utilities/answer_testing/output_tests.py	Tue May 10 11:51:34 2011 -0400
@@ -118,8 +118,8 @@
         """
         if a1.shape != a2.shape:
             raise ShapeMismatch(a1, a2)
-        delta = na.abs(a1 - a2)/(a1 + a2)
-        if delta.max() > acceptable:
+        delta = na.abs(a1 - a2).astype("float64")/(a1 + a2)
+        if na.nanmax(delta) > acceptable:
             raise ArrayDelta(delta, acceptable)
         return True
 


http://bitbucket.org/yt_analysis/yt/changeset/190cd8ee2a11/
changeset:   r4270:190cd8ee2a11
branch:      yt
user:        MatthewTurk
date:        2011-05-18 17:21:51
summary:     Merging in the testing changes.
affected #:  17 files (8.8 KB)

--- a/tests/answer_tests/fields_to_test.py	Wed May 18 09:37:36 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,9 +0,0 @@
-# We want to test several things.  We need to be able to run the 
-
-field_list = ["Density", "Temperature", "x-velocity", "y-velocity", "z-velocity",
-                  # Now some derived fields
-                  "Pressure", "SoundSpeed", "particle_density", "Entropy",
-                  # Ghost zones
-                  "AveragedDensity", "DivV"]
-
-particle_field_list = ["particle_position_x", "ParticleMassMsun"]


--- a/tests/answer_tests/hierarchy_consistency.py	Wed May 18 09:37:36 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTStaticOutputTest, RegressionTestException
-from yt.funcs import ensure_list
-
-class HierarchyInconsistent(RegressionTestException):
-    pass
-
-class HierarchyConsistency(YTStaticOutputTest):
-    name = "hierarchy_consistency"
-    def run(self):
-        self.result = \
-            all( g in ensure_list(c.Parent) for g in self.pf.h.grids
-                                            for c in g.Children )
-
-    def compare(self, old_result):
-        if not(old_result and self.result): raise HierarchyInconsistent()
-
-class GridLocationsProperties(YTStaticOutputTest):
-    name = "level_consistency"
-    def run(self):
-        self.result = dict(grid_left_edge = self.pf.h.grid_left_edge,
-                           grid_right_edge = self.pf.h.grid_right_edge,
-                           grid_levels = self.pf.h.grid_levels,
-                           grid_particle_count = self.pf.h.grid_particle_count,
-                           grid_dimensions = self.pf.h.grid_dimensions)
-
-    def compare(self, old_result):
-        # We allow now difference between these values
-        self.compare_data_arrays(self.result, old_result, 0.0)
-
-class GridRelationshipsChanged(RegressionTestException):
-    pass
-
-class GridRelationships(YTStaticOutputTest):
-
-    name = "grid_relationships"
-    def run(self):
-        self.result = [ [p.id for p in ensure_list(g.Parent) if g.Parent is not None]
-                        for g in self.pf.h.grids ]
-
-    def compare(self, old_result):
-        if len(old_result) != len(self.result):
-            raise GridRelationshipsChanged()
-        for plist1, plist2 in zip(old_result, self.result):
-            if len(plist1) != len(plist2): raise GridRelationshipsChanged()
-            if not all( (p1 == p2 for p1, p2 in zip(plist1, plist2) ) ):
-                raise GridRelationshipsChanged()
-
-class GridGlobalIndices(YTStaticOutputTest):
-    name = "global_startindex"
-
-    def run(self):
-        self.result = na.array([g.get_global_startindex()
-                                for g in self.pf.h.grids])
-
-    def compare(self, old_result):
-        self.compare_array_delta(old_result, self.result, 0.0)


--- a/tests/answer_tests/object_field_values.py	Wed May 18 09:37:36 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-import hashlib
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTStaticOutputTest, RegressionTestException, create_test
-from yt.funcs import ensure_list
-from fields_to_test import field_list, particle_field_list
-
-class FieldHashesDontMatch(RegressionTestException):
-    pass
-
-class YTFieldValuesTest(YTStaticOutputTest):
-    def run(self):
-        vals = self.data_object[self.field].copy()
-        vals.sort()
-        self.result = hashlib.sha256(vals.tostring()).hexdigest()
-
-    def compare(self, old_result):
-        if self.result != old_result: raise FieldHashesDontMatch
-
-class CenteredSphere(YTFieldValuesTest):
-
-    def setup(self):
-        YTFieldValuesTest.setup(self)
-        center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-        width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-        self.data_object = self.pf.h.sphere(center, width/0.25)
-
-for field in field_list + particle_field_list:
-    create_test(CenteredSphere, "centered_sphere_%s" % (field), field = field)
-
-class OffCenteredSphere(YTFieldValuesTest):
-
-    def setup(self):
-        YTFieldValuesTest.setup(self)
-        center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-        width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-        self.data_object = self.pf.h.sphere(center - 0.25 * width, width/0.25)
-
-for field in field_list + particle_field_list:
-    create_test(OffCenteredSphere, "off_centered_sphere_%s" % (field), field = field)
-
-class CornerSphere(YTFieldValuesTest):
-
-    def setup(self):
-        YTFieldValuesTest.setup(self)
-        width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-        self.data_object = self.pf.h.sphere(self.pf.domain_left_edge, width/0.25)
-
-for field in field_list + particle_field_list:
-    create_test(CornerSphere, "corner_sphere_%s" % (field), field = field)
-
-class AllData(YTFieldValuesTest):
-    def setup(self):
-        YTFieldValuesTest.setup(self)
-        self.data_object = self.pf.h.all_data()
-
-for field in field_list + particle_field_list:
-    create_test(AllData, "all_data_%s" % (field), field = field)


--- a/tests/answer_tests/projections.py	Wed May 18 09:37:36 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,16 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestGasDistribution
-from fields_to_test import field_list
-
-for axis in range(3):
-    for field in field_list:
-        create_test(TestProjection, "projection_test_%s_%s" % (axis, field),
-                    field = field, axis = axis)
-
-for field in field_list:
-    create_test(TestGasDistribution, "profile_density_test_%s" % field,
-                field_x = "Density", field_y = field)
-
-


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/fields_to_test.py	Wed May 18 11:21:51 2011 -0400
@@ -0,0 +1,9 @@
+# We want to test several things.  We need to be able to run the 
+
+field_list = ["Density", "Temperature", "x-velocity", "y-velocity", "z-velocity",
+                  # Now some derived fields
+                  "Pressure", "SoundSpeed", "particle_density", "Entropy",
+                  # Ghost zones
+                  "AveragedDensity", "DivV"]
+
+particle_field_list = ["particle_position_x", "ParticleMassMsun"]


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/hierarchy_consistency.py	Wed May 18 11:21:51 2011 -0400
@@ -0,0 +1,59 @@
+import numpy as na
+
+from yt.utilities.answer_testing.output_tests import \
+    YTStaticOutputTest, RegressionTestException
+from yt.funcs import ensure_list
+
+class HierarchyInconsistent(RegressionTestException):
+    pass
+
+class HierarchyConsistency(YTStaticOutputTest):
+    name = "hierarchy_consistency"
+    def run(self):
+        self.result = \
+            all( g in ensure_list(c.Parent) for g in self.pf.h.grids
+                                            for c in g.Children )
+
+    def compare(self, old_result):
+        if not(old_result and self.result): raise HierarchyInconsistent()
+
+class GridLocationsProperties(YTStaticOutputTest):
+    name = "level_consistency"
+    def run(self):
+        self.result = dict(grid_left_edge = self.pf.h.grid_left_edge,
+                           grid_right_edge = self.pf.h.grid_right_edge,
+                           grid_levels = self.pf.h.grid_levels,
+                           grid_particle_count = self.pf.h.grid_particle_count,
+                           grid_dimensions = self.pf.h.grid_dimensions)
+
+    def compare(self, old_result):
+        # We allow now difference between these values
+        self.compare_data_arrays(self.result, old_result, 0.0)
+
+class GridRelationshipsChanged(RegressionTestException):
+    pass
+
+class GridRelationships(YTStaticOutputTest):
+
+    name = "grid_relationships"
+    def run(self):
+        self.result = [ [p.id for p in ensure_list(g.Parent) if g.Parent is not None]
+                        for g in self.pf.h.grids ]
+
+    def compare(self, old_result):
+        if len(old_result) != len(self.result):
+            raise GridRelationshipsChanged()
+        for plist1, plist2 in zip(old_result, self.result):
+            if len(plist1) != len(plist2): raise GridRelationshipsChanged()
+            if not all( (p1 == p2 for p1, p2 in zip(plist1, plist2) ) ):
+                raise GridRelationshipsChanged()
+
+class GridGlobalIndices(YTStaticOutputTest):
+    name = "global_startindex"
+
+    def run(self):
+        self.result = na.array([g.get_global_startindex()
+                                for g in self.pf.h.grids])
+
+    def compare(self, old_result):
+        self.compare_array_delta(old_result, self.result, 0.0)


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/object_field_values.py	Wed May 18 11:21:51 2011 -0400
@@ -0,0 +1,59 @@
+import hashlib
+import numpy as na
+
+from yt.utilities.answer_testing.output_tests import \
+    YTStaticOutputTest, RegressionTestException, create_test
+from yt.funcs import ensure_list
+from fields_to_test import field_list, particle_field_list
+
+class FieldHashesDontMatch(RegressionTestException):
+    pass
+
+class YTFieldValuesTest(YTStaticOutputTest):
+    def run(self):
+        vals = self.data_object[self.field].copy()
+        vals.sort()
+        self.result = hashlib.sha256(vals.tostring()).hexdigest()
+
+    def compare(self, old_result):
+        if self.result != old_result: raise FieldHashesDontMatch
+
+class CenteredSphere(YTFieldValuesTest):
+
+    def setup(self):
+        YTFieldValuesTest.setup(self)
+        center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+        width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
+        self.data_object = self.pf.h.sphere(center, width/0.25)
+
+for field in field_list + particle_field_list:
+    create_test(CenteredSphere, "centered_sphere_%s" % (field), field = field)
+
+class OffCenteredSphere(YTFieldValuesTest):
+
+    def setup(self):
+        YTFieldValuesTest.setup(self)
+        center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+        width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
+        self.data_object = self.pf.h.sphere(center - 0.25 * width, width/0.25)
+
+for field in field_list + particle_field_list:
+    create_test(OffCenteredSphere, "off_centered_sphere_%s" % (field), field = field)
+
+class CornerSphere(YTFieldValuesTest):
+
+    def setup(self):
+        YTFieldValuesTest.setup(self)
+        width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
+        self.data_object = self.pf.h.sphere(self.pf.domain_left_edge, width/0.25)
+
+for field in field_list + particle_field_list:
+    create_test(CornerSphere, "corner_sphere_%s" % (field), field = field)
+
+class AllData(YTFieldValuesTest):
+    def setup(self):
+        YTFieldValuesTest.setup(self)
+        self.data_object = self.pf.h.all_data()
+
+for field in field_list + particle_field_list:
+    create_test(AllData, "all_data_%s" % (field), field = field)


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/projections.py	Wed May 18 11:21:51 2011 -0400
@@ -0,0 +1,16 @@
+from yt.utilities.answer_testing.output_tests import \
+    SingleOutputTest, create_test
+from yt.utilities.answer_testing.hydro_tests import \
+    TestProjection, TestGasDistribution
+from fields_to_test import field_list
+
+for axis in range(3):
+    for field in field_list:
+        create_test(TestProjection, "projection_test_%s_%s" % (axis, field),
+                    field = field, axis = axis)
+
+for field in field_list:
+    create_test(TestGasDistribution, "profile_density_test_%s" % field,
+                field_x = "Density", field_y = field)
+
+


--- a/tests/runall.py	Wed May 18 09:37:36 2011 -0400
+++ b/tests/runall.py	Wed May 18 11:21:51 2011 -0400
@@ -1,30 +1,97 @@
-"""
-Basic unit test suite to run all the tests at once. Hopefully this it's
-clear how to append additional tests.
+from yt.config import ytcfg
+ytcfg["yt","loglevel"] = "50"
+ytcfg["yt","serialize"] = "False"
 
-You can run this using: 
+from yt.utilities.answer_testing.api import \
+    RegressionTestRunner, clear_registry, create_test, \
+    TestFieldStatistics, TestAllProjections, registry_entries
 
-$ python tests/runall.py
+from yt.utilities.command_line import get_yt_version
 
-This should be done from the root directory of the installation.
+from yt.mods import *
+import fnmatch
+import imp
+import optparse
+import itertools
 
-YT can either be installed globally, or the extensions build with:
+#
+# We assume all tests are to be run, unless explicitly given the name of a
+# single test or something that can be run through fnmatch.
+#
+# Keep in mind that we use a different nomenclature here than is used in the
+# Enzo testing system.  Our 'tests' are actually tests that are small and that
+# run relatively quickly on a single dataset; in Enzo's system, a 'test'
+# encompasses both the creation and the examination of data.  Here we assume
+# the data is kept constant.
+#
 
-$ python setup.py build_ext --inplace
-"""
+cwd = os.path.dirname(globals().get("__file__", os.getcwd()))
 
-import unittest
+def load_tests(iname, idir):
+    f, filename, desc = imp.find_module(iname, [idir])
+    tmod = imp.load_module(iname, f, filename, desc)
+    return tmod
 
-import test_lagos
-import test_raven
-import test_hdf5_reader
+def find_and_initialize_tests():
+    mapping = {}
+    for f in glob.glob(os.path.join(cwd,"*.py")):
+        clear_registry()
+        iname = os.path.basename(f[:-3])
+        try:
+            load_tests(iname, cwd)
+            mapping[iname] = registry_entries()
+            #print "Associating %s with" % (iname)
+            #print "\n    ".join(registry_entries())
+        except ImportError:
+            pass
+    return mapping
 
-def get_suite():
-    suite_l = unittest.defaultTestLoader.loadTestsFromModule(test_lagos)
-    suite_r = unittest.defaultTestLoader.loadTestsFromModule(test_raven)
-    suite_h = unittest.defaultTestLoader.loadTestsFromModule(test_hdf5_reader)
-    suite = unittest.TestSuite([suite_l, suite_r])
-    return suite
-
-if __name__ == '__main__':
-    unittest.main(defaultTest='get_suite')
+if __name__ == "__main__":
+    mapping = find_and_initialize_tests()
+    test_storage_directory = ytcfg.get("yt","test_storage_dir")
+    my_hash = get_yt_version()
+    parser = optparse.OptionParser()
+    parser.add_option("-f", "--parameter-file", dest="parameter_file",
+                      default = os.path.join(cwd, "DD0010/moving7_0010"),
+                      help = "The parameter file value to feed to 'load' to test against",
+                      )
+    parser.add_option("-l", "--list", dest="list_tests", action="store_true",
+                      default = False, help = "List all tests and then exit")
+    parser.add_option("-t", "--tests", dest="test_pattern", default="*",
+                      help = "The test name pattern to match.  Can include wildcards.")
+    parser.add_option("-o", "--output", dest="storage_dir",
+                      default=test_storage_directory,
+                      help = "Base directory for storing test output.")
+    parser.add_option("-c", "--compare", dest="compare_name",
+                      default=None,
+                      help = "The name against which we will compare")
+    parser.add_option("-n", "--name", dest="this_name",
+                      default=my_hash,
+                      help = "The name we'll call this set of tests")
+    opts, args = parser.parse_args()
+    if opts.list_tests:
+        print "\n    ".join(sorted(itertools.chain(*mapping.values())))
+        sys.exit(0)
+    pf = load(opts.parameter_file)
+    if pf is None:
+        print "Couldn't load the specified parameter file."
+        sys.exit(1)
+    # Now we modify our compare name and self name to include the pf.
+    compare_id = opts.compare_name
+    if compare_id is not None: compare_id += "_%s_%s" % (pf, pf._hash())
+    this_id = opts.this_name + "_%s_%s" % (pf, pf._hash())
+    rtr = RegressionTestRunner(this_id, compare_id,
+            results_path = opts.storage_dir,
+            compare_results_path = opts.storage_dir,
+            io_log = [opts.parameter_file])
+    tests_to_run = []
+    for m, vals in mapping.items():
+        print vals, opts.test_pattern
+        new_tests = fnmatch.filter(vals, opts.test_pattern)
+        if len(new_tests) == 0: continue
+        tests_to_run += new_tests
+        load_tests(m, cwd)
+    for test_name in sorted(tests_to_run):
+        rtr.run_test(test_name)
+    for test_name, result in sorted(rtr.passed_tests.items()):
+        print "TEST %s: %s" % (test_name, result)


--- a/tests/test_hdf5_reader.py	Wed May 18 09:37:36 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-import unittest, numpy, tables, sys, os, os.path
-sys.path.insert(0,".")
-
-from new import classobj
-
-from yt.lagos.HDF5LightReader import ReadData, ReadingError
-
-my_dtypes = ['short','int','longlong','ushort','uint','ulonglong',
-             'float','double']
-
-class HDF5LightTestIOBase(object):
-    def setUp(self):
-        self.rand_array = numpy.random.random(3000).reshape((30,10,10)).astype(self.dtype)
-    def test_check_io(self):
-        my_table = tables.openFile("testing_h5lt_io.h5","w")
-        my_table.createArray("/","%s" % (self.dtype),self.rand_array)
-        my_table.close()
-        recv_array = ReadData("testing_h5lt_io.h5", "/%s" % (self.dtype))
-        self.assert_(numpy.all(recv_array == self.rand_array))
-        self.assert_(recv_array.shape == self.rand_array.shape)
-        self.assertTrue(recv_array.flags.owndata)
-    def tearDown(self):
-        os.unlink("testing_h5lt_io.h5")
-
-for dtype in my_dtypes:
-    temp = classobj("TestingIO_%s" % (dtype),
-            (HDF5LightTestIOBase,unittest.TestCase), {'dtype':dtype})
-    exec('TestingIO_%s = temp' % dtype)
-
-class HDF5LightTestError(unittest.TestCase):
-    def test_no_file(self):
-        fn = "%s.h5" % int(numpy.random.random(1) * 1e6)
-        self.assertRaises(ReadingError, ReadData,fn,"/Nothing")
-    def test_no_dataset(self):
-        fn = "%s.h5" % int(numpy.random.random(1) * 1e6)
-        my_table = tables.openFile("testing_h5lt_io.h5","w")
-        my_table.close()
-        self.assertRaises(ReadingError, ReadData,fn,"/Nothing")
-    def tearDown(self):
-        if os.path.exists("testing_h5lt_io.h5"): os.unlink("testing_h5lt_io.h5")
-
-if __name__ == "__main__":
-    unittest.main()


--- a/tests/test_lagos.py	Wed May 18 09:37:36 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,698 +0,0 @@
-"""
-Test that we can get outputs, and interact with them in some primitive ways.
-"""
-
-# @TODO: Add unit test for deleting field from FieldInfo
-
-import unittest, glob, os.path, os, sys, StringIO
-
-print "Reporting from %s" % (os.getcwd())
-sys.path = ['.'] + sys.path
-
-from yt.config import ytcfg
-ytcfg["yt","LogLevel"] = '50'
-ytcfg["yt","logFile"] = "False"
-ytcfg["yt","suppressStreamLogging"] = "True"
-ytcfg["lagos","serialize"] = "False"
-
-import cPickle
-import numpy as na
-#from yt.utilities.exceptions import *
-from yt.data_objects.field_info_container import \
-    ValidationException
-from yt.mods import *
-from yt.analysis_modules.level_sets.api import *
-from yt.utilities.linear_interpolators import \
-    UnilinearFieldInterpolator, \
-    BilinearFieldInterpolator, \
-    TrilinearFieldInterpolator
-
-# The dataset used is located at:
-# http://yt.spacepope.org/DD0018.zip
-fn = "DD0010/moving7_0010"
-fn = os.path.join(os.path.dirname(__file__), fn)
-
-class LagosTestingBase:
-    def setUp(self):
-        self.OutputFile = load(fn)
-        self.hierarchy = self.OutputFile.hierarchy
-        self.v, self.c = self.hierarchy.find_max("Density")
-        gp = os.path.join(os.path.dirname(fn),"*.yt")
-        ytFiles = glob.glob(gp)
-        for i in ytFiles:
-            #print "Removing %s" % (i)
-            os.unlink(i)
-
-    def tearDown(self):
-        if hasattr(self,'data'): del self.data
-        if hasattr(self,'region'): del self.region
-        if hasattr(self,'ind_to_get'): del self.ind_to_get
-        del self.OutputFile, self.hierarchy
-        
-class TestParameterFileStore(unittest.TestCase):
-    def setUp(self):
-        self.original = (yt.config.ytcfg.get("yt","ParameterFileStore"),
-                         yt.config.ytcfg.get("lagos","serialize"))
-        ytcfg['yt','ParameterFileStore'] = "testing.csv"
-        pfs = ParameterFileStore()
-        os.unlink(pfs._get_db_name())
-        self.pfs = ParameterFileStore() # __init__ gets called again
-        ytcfg['lagos', 'serialize'] = "True"
-
-    def testCacheFile(self):
-        pf1 = load(fn)
-        pf2 = self.pfs.get_pf_hash(pf1._hash())
-        self.assertTrue(pf1 is pf2)
-
-    def testGrabFile(self):
-        pf1 = load(fn)
-        hash = pf1._hash()
-        del pf1
-        pf2 = self.pfs.get_pf_hash(hash)
-        self.assertTrue(hash == pf2._hash())
-
-    def testGetCurrentTimeID(self):
-        pf1 = load(fn)
-        hash = pf1._hash()
-        ctid = pf1["CurrentTimeIdentifier"]
-        del pf1
-        pf2 = self.pfs.get_pf_ctid(ctid)
-        self.assertTrue(hash == pf2._hash())
-
-    def tearDown(self):
-        os.unlink(self.pfs._get_db_name())
-        ytcfg['yt', 'ParameterFileStore'] = self.original[0]
-        ytcfg['lagos', 'serialize'] = self.original[1]
-        self.pfs.__init__()
-
-class TestHierarchy(LagosTestingBase, unittest.TestCase):
-    def testGetHierarchy(self):
-        self.assert_(self.OutputFile.hierarchy != None)
-
-    def testGetUnits(self):
-        self.assert_(self.OutputFile["cm"] != 1.0)
-
-    def testGetSmallestDx(self):
-        self.assertAlmostEqual(self.hierarchy.get_smallest_dx(),
-                               0.00048828125, 7)
-
-    def testGetNumberOfGrids(self):
-        self.assertEqual(self.hierarchy.num_grids, len(self.hierarchy.grids))
-        self.assertEqual(self.hierarchy.num_grids, 10)
-
-    def testChildrenOfRootGrid(self):
-        for child in self.hierarchy.grids[0].Children:
-            self.assert_(child.Parent.id == self.hierarchy.grids[0].id)
-
-    def testGetSelectLevels(self):
-        for level in range(self.hierarchy.max_level+1):
-            for grid in self.hierarchy.select_grids(level):
-                self.assert_(grid.Level == level)
-
-    def testPrintStats(self):
-        a = sys.stdout
-        sys.stdout = StringIO.StringIO()
-        try:
-            self.hierarchy.print_stats()
-            worked = True
-        except:
-            worked = False
-        sys.stdout = a
-        self.assert_(worked)
-
-    def testDataTypes(self):
-        r=self.hierarchy.region(
-                     [0.5,0.5,0.5],[0.0, 0.0, 0.0],
-                     [1.0, 1.0, 1.0],
-                     ["CellMass","Temperature"])
-            # Testing multiple fields fed in
-        s=self.hierarchy.sphere(
-                     [0.5,0.5,0.5],2.0,
-                     ["CellMass","Temperature"])
-        ms = s["CellMass"].sum() # Testing adding new field transparently
-        mr = r["CellMass"].sum() # Testing adding new field transparently
-        self.assertEqual(ms,mr)  # Asserting equality between the two
-
-    def testProjectionCorrectnessMultipleFields(self):
-        p = self.hierarchy.proj(0,["Density","Ones"], weight=None) # Unweighted
-        self.assertTrue(na.all(p["Ones"] == 1.0))
-
-    def testProjectionMakingMultipleFields(self):
-        p = self.hierarchy.proj(0,["Density","Temperature","Ones"],weight_field="Ones") # Unweighted
-        # One for each field, pdx, pdy, px, py, and one for the weight
-        self.assertEqual(len(p.data.keys()), 8)
-
-    def testProjectionSuccess(self):
-        p = self.hierarchy.proj(0,"Density") # Unweighted
-        p = self.hierarchy.proj(1,"Temperature","Density") # Weighted
-        p = self.hierarchy.proj(2,"Entropy") # Derived field
-
-    def testUnweightedProjectionCorrectness(self):
-        # Now we test that we get good answers
-        for axis in range(3):
-            p = self.hierarchy.proj(axis, "Ones") # Derived field
-            self.assertTrue(na.all(p["Ones"] == 1.0))
-            # Regardless of weighting, we want ones back
-
-    def testWeightedProjectionCorrectness(self):
-        # Now we test that we get good answers
-        for axis in range(3):
-            # Regardless of weighting, we want ones back
-            p = self.hierarchy.proj(axis, "Ones", "Density")
-            self.assertTrue(na.all(p["Ones"] == 1.0))
-
-# Now we test each datatype in turn
-
-def _returnFieldFunction(field):
-    def field_function(self):
-        try:
-            self.data[field.name]
-            if not field.particle_type and not field.vector_field and \
-                self.data[field.name].size > 1:
-                self.assertEqual(na.product(self.data["Density"].shape),
-                                 na.product(self.data[field.name].shape))
-            del self.data[field.name]
-        except ValidationException:
-            pass
-    return field_function
-
-def _returnProfile1DFunction(field, weight, accumulation, lazy):
-    def add_field_function(self):
-        self.data.set_field_parameter("center",[.5,.5,.5])
-        profile = BinnedProfile1D(
-            self.data, 8, "RadiusCode", 0, 1.0, False, lazy)
-        profile.add_fields(field, weight=weight, accumulation=accumulation)
-    return add_field_function
-
-def _returnProfile2DFunction(field, weight, accumulation, lazy):
-    def add_field_function(self):
-        self.data.set_field_parameter("center",[.5,.5,.5])
-        cv_min = self.hierarchy.get_smallest_dx()**3.0
-        cv_max = 1.0 / max(self.OutputFile["TopGridDimensions"])
-        profile = BinnedProfile2D(self.data,
-                    8, "RadiusCode", 1e-3, 1.0, True,
-                    8, "CellVolumeCode", cv_min, cv_max, True, lazy)
-        profile.add_fields(field, weight=weight, accumulation=accumulation)
-    return add_field_function
-
-class DataTypeTestingBase:
-    def setUp(self):
-        LagosTestingBase.setUp(self)
-
-    def testRepr(self):
-        self.assertTrue(
-            ("%s" % self.data).startswith(self.data.__class__.__name__))
-
-class Data3DBase:
-    def testProfileAccumulateMass(self):
-        self.data.set_field_parameter("center",[0.5]*3)
-        profile = BinnedProfile1D(self.data, 8, "RadiusCode", 0, 1.0,
-                                           False, True)
-        profile.add_fields("CellMassMsun", weight=None, accumulation=True)
-        v1 = profile["CellMassMsun"].max()
-        v2 = self.data["CellMassMsun"].sum()
-        v2 = na.abs(1.0 - v2/v1)
-        self.assertAlmostEqual(v2, 0.0, 7)
-
-    def testExtractConnectedSetsNoCache(self):
-        mi = self.data["Density"].min() * 2.0
-        ma = self.data["Density"].max() * 0.99
-        cons, contours = self.data.extract_connected_sets(
-            "Density", 2, mi, ma)
-        self.assertEqual(len(contours), 2) # number of contour levels
-        self.assertEqual(len(contours[0]), 2)
-        self.assertEqual(len(contours[1]), 1)
-
-    def testExtractConnectedSetsCache(self):
-        mi = self.data["Density"].min() * 2.0
-        ma = self.data["Density"].max() * 0.99
-        cons, contours = self.data.extract_connected_sets(
-            "Density", 2, mi, ma, cache=True)
-        self.assertEqual(len(contours), 2) # number of contour levels
-        self.assertEqual(len(contours[0]), 2)
-        self.assertEqual(len(contours[1]), 1)
-
-    def testContoursCache(self):
-        cid = identify_contours(self.data, "Density",
-                self.data["Density"].min()*2.00,
-                self.data["Density"].max()*1.01)
-        self.assertEqual(len(cid), 2)
-
-    def testContoursObtain(self):
-        cid = identify_contours(self.data, "Density",
-                self.data["Density"].min()*2.00, self.data["Density"].max()*1.01)
-        self.assertEqual(len(cid), 2)
-
-    def testContoursValidityMax(self):
-        v1 = self.data["Density"].max()*0.99
-        v2 = self.data["Density"].max()*1.01
-        cid = identify_contours(self.data, "Density", v1, v2)
-        self.assertTrue(na.all(v1 < self.data["Density"][cid[0]])
-                    and na.all(v2 > self.data["Density"][cid[0]]))
-        self.assertEqual(len(cid), 1)
-
-    def testContoursValidityMin(self):
-        v1 = self.data["Density"].min()*0.99
-        v2 = self.data["Density"].min()*1.01
-        cid = identify_contours(self.data, "Density", v1, v2)
-        self.assertTrue(na.all(v1 < self.data["Density"][cid[0]])
-                    and na.all(v2 > self.data["Density"][cid[0]]))
-        self.assertEqual(len(cid), 3)
-
-    def testPickle(self):
-        ps = cPickle.dumps(self.data)
-        pf, obj = cPickle.loads(ps)
-        self.assertEqual(obj["CellMassMsun"].sum(), self.data["CellMassMsun"].sum())
-
-for field_name in FieldInfo:
-    if field_name.startswith("PT"): continue
-    field = FieldInfo[field_name]
-    setattr(DataTypeTestingBase, "test%s" % field.name, _returnFieldFunction(field))
-
-field = "Temperature"
-for weight in [None, "CellMassMsun"]:
-    for lazy in [True, False]:
-        for accumulation in [True, False]:
-            func = _returnProfile1DFunction(field, weight, accumulation, lazy)
-            name = "test%sProfile1D_w%s_l%s_a%s" % (field,
-                                                weight, lazy,
-                                                accumulation)
-            setattr(Data3DBase, name, func)
-
-for weight in [None, "CellMassMsun"]:
-    for lazy in [True, False]:
-        for accumulation_x in [True, False]:
-            for accumulation_y in [True, False]:
-                acc = (accumulation_x, accumulation_y)
-                func = _returnProfile2DFunction(field, weight, acc, lazy)
-                name = "test%sProfile2D_w%s_l%s_a%s_a%s" % (field,
-                                                        weight, lazy,
-                                                        accumulation_x,
-                                                        accumulation_y)
-                setattr(Data3DBase, name, func)
-
-class TestSmoothedCoveringGrid(LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        LagosTestingBase.setUp(self)
-
-    def testAllCover(self):
-        DIMS = 32
-        for i in range(self.hierarchy.max_level+1):
-            dx = (DIMS*2**i)**-1
-            LE = na.array([0.5,0.5,0.5])-(dx*DIMS/2.0)
-            RE = na.array([0.5,0.5,0.5])+(dx*DIMS/2.0)
-            cg = self.hierarchy.smoothed_covering_grid(
-                    level=i, left_edge=LE, right_edge=RE,
-                    dims=[DIMS]*3, fields=["Density"])
-            self.assertFalse(na.any(na.isnan(cg["Density"])))
-            self.assertFalse(na.any(cg["Density"]==-999))
-
-    def testAddField(self):
-        DIMS = 64
-        i = 5
-        dx = (DIMS*2**i)**-1
-        LE = na.array([0.5,0.5,0.5])-(dx*DIMS/2.0)
-        RE = na.array([0.5,0.5,0.5])+(dx*DIMS/2.0)
-        cg = self.hierarchy.smoothed_covering_grid(
-                level=i, left_edge=LE, right_edge=RE,
-                dims=[DIMS]*3, fields=["Density"])
-        self.assertFalse(na.any(na.isnan(cg["Temperature"])))
-        self.assertFalse(na.any(cg["Temperature"]==-999))
-
-class TestIntSmoothedCoveringGrid(LagosTestingBase, unittest.TestCase):
-    def setUp(self):        
-        LagosTestingBase.setUp(self)
-
-    def testCoordinates(self):
-        # We skip the first grid because it has ghost zones on all sides
-        for g in self.hierarchy.grids[1:]:
-            LE = g.LeftEdge - g.dds
-            level = g.Level
-            dims = g.ActiveDimensions + 2
-            g1 = self.hierarchy.si_covering_grid(level, LE, dims)
-            g2 = g.retrieve_ghost_zones(1, ["x","y","z"], smoothed=False)
-            for field in 'xyz':
-                diff = na.abs((g1[field] - g2[field])/(g1[field] + g2[field]))
-                self.assertAlmostEqual(diff.max(), 0.0, 1e-14)
-
-class TestDataCube(LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        LagosTestingBase.setUp(self)
-
-    def testNoGhost(self):
-        DW = self.OutputFile["DomainRightEdge"] \
-           - self.OutputFile["DomainLeftEdge"]
-        for g in self.hierarchy.grids:
-            cube = g.retrieve_ghost_zones(0, "Density")
-            self.assertTrue(na.all(cube["Density"] == g["Density"]))
-            cube["Density"] = na.arange(cube["Density"].size).reshape(cube["Density"].shape)
-            cube.flush_data(field="Density")
-            self.assertTrue(na.all(g["Density"] == cube["Density"]))
-
-    def testOffsetDomain(self):
-        DW = self.OutputFile["DomainRightEdge"] \
-           - self.OutputFile["DomainLeftEdge"]
-        for g in self.hierarchy.grids:
-            cube = self.hierarchy.covering_grid(g.Level,
-                g.LeftEdge+DW, g.ActiveDimensions)
-            self.assertTrue(na.all(g["Density"] == cube["Density"]))
-
-    def testTwoGhost(self):
-        for g in self.hierarchy.grids:
-            cube = g.retrieve_ghost_zones(2, "Density")
-
-    def testMultipleFields(self):
-        for g in self.hierarchy.grids:
-            cube1 = g.retrieve_ghost_zones(0, ["Density","Temperature"])
-            self.assertTrue(na.all(cube1["Density"] == g["Density"]))
-            self.assertTrue(na.all(cube1["Temperature"] == g["Temperature"]))
-            cube2a = g.retrieve_ghost_zones(0, "Density")
-            cube2b = g.retrieve_ghost_zones(0, "Temperature")
-            self.assertTrue(na.all(cube1["Density"] == cube2a["Density"]))
-            self.assertTrue(na.all(cube1["Temperature"] == cube2b["Temperature"]))
-    
-    def testFlushBackToGrids(self):
-        ml = self.hierarchy.max_level
-        cg = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
-        cg["Ones"] *= 2.0
-        cg.flush_data(field="Ones")
-        for g in na.concatenate([self.hierarchy.select_grids(i) for i in range(3)]):
-            self.assertEqual(g["Ones"].max(), 2.0)
-            self.assertEqual(g["Ones"][g["Ones"]*g.child_mask>0].min(), 2.0)
-
-    def testFlushBackToNewCover(self):
-        ml = self.hierarchy.max_level
-        cg = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
-        cg["tempContours"] = cg["Ones"] * 2.0
-        cg.flush_data(field="tempContours")
-        cg2 = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
-        self.assertTrue(na.all(cg["tempContours"] == cg2["tempContours"]))
-
-    def testRawFlushBack(self):
-        ml = self.hierarchy.max_level
-        cg = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
-        cg["DensityNew"] = cg["Density"] * 2.111
-        cg.flush_data(field="DensityNew")
-        for g in na.concatenate([self.hierarchy.select_grids(i) for i in range(3)]):
-            ni = g["DensityNew"] > 0
-            min_diff = (g["DensityNew"][ni]/g["Density"][ni]).max()
-            max_diff = (g["DensityNew"][ni]/g["Density"][ni]).min()
-            min_diff_i = na.argmin(g["DensityNew"][ni]/g["Density"][ni])
-            max_diff_i = na.argmax(g["DensityNew"][ni]/g["Density"][ni])
-            self.assertAlmostEqual(min_diff, 2.111, 5)
-            self.assertAlmostEqual(max_diff, 2.111, 5)
-
-    def testAllCover(self):
-        cg = self.hierarchy.covering_grid(1, [0.0]*3, [32,32,32])
-        mi, ma = 1e30, -1e30
-        for g in na.concatenate([self.hierarchy.select_grids(i) for i in range(2)]):
-            ma = max(ma, g["Density"].max())
-            mi = min(mi, g["Density"].min())
-        self.assertEqual(cg["Density"].max(), ma)
-        self.assertEqual(cg["Density"].min(), mi)
-
-    def testCellVolume(self):
-        cg = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
-        self.assertEqual(na.unique(cg["CellVolume"]).size, 1)
-
-
-class TestDiskDataType(Data3DBase, DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data=self.hierarchy.disk(
-                     [0.5,0.5,0.5],[0.2, 0.1, 0.5],1.0,1.0)
-
-class TestRegionDataType(Data3DBase, DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data=self.hierarchy.region(
-                     [0.5,0.5,0.5],[0.0, 0.0, 0.0],
-                     [1.0, 1.0, 1.0])
-    def testVolume(self):
-        vol = self.data["CellVolume"].sum() / self.data.convert("cm")**3.0
-        self.assertAlmostEqual(vol,1.0,7)
-
-class TestRegionStrictDataType(Data3DBase, DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data=self.hierarchy.region_strict(
-                     [0.5,0.5,0.5],[0.0, 0.0, 0.0],
-                     [1.0, 1.0, 1.0])
-    def testVolume(self):
-        vol = self.data["CellVolume"].sum() / self.data.convert("cm")**3.0
-        self.assertAlmostEqual(vol,1.0,7)
-
-class TestPeriodicRegionDataType(Data3DBase, DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data=self.hierarchy.periodic_region(
-                     [0.5,0.5,0.5],[0.5, 0.5, 0.5],
-                     [1.5,1.5,1.5])
-    def testVolume(self):
-        vol = self.data["CellVolume"].sum() / self.data.convert("cm")**3.0
-        self.assertAlmostEqual(vol,1.0,7)
-
-class TestPeriodicRegionStrictDataType(Data3DBase,
-            DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data=self.hierarchy.periodic_region_strict(
-                     [0.5,0.5,0.5],[0.5, 0.5, 0.5],
-                     [1.5,1.5,1.5])
-    def testVolume(self):
-        vol = self.data["CellVolume"].sum() / self.data.convert("cm")**3.0
-        self.assertAlmostEqual(vol,1.0,7)
-
-class TestSphereDataType(Data3DBase, DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data=self.hierarchy.sphere([0.5,0.5,0.5],1.0)
-    def testVolume(self):
-        vol = self.data["CellVolume"].sum() / self.data.convert("cm")**3.0
-        self.assertAlmostEqual(vol,1.0,7)
-
-class TestSliceDataType(DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data = self.hierarchy.slice(0,0.5, center=[0.5, 0.5, 0.5])
-
-class TestCuttingPlane(DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data = self.hierarchy.cutting([0.1,0.3,0.4], [0.5,0.5,0.5], ["Density"])
-    def testAxisVectors(self):
-        x_v = self.data._x_vec
-        y_v = self.data._y_vec
-        z_v = self.data._norm_vec
-        self.assertAlmostEqual(na.dot(x_v, y_v), 0.0, 7)
-        self.assertAlmostEqual(na.dot(x_v, z_v), 0.0, 7)
-        self.assertAlmostEqual(na.dot(y_v, z_v), 0.0, 7)
-    def testZHeight(self):
-        self.assertTrue(na.all(self.data['pz'] < self.data['dx']))
-
-class TestGridDataType(DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
-    def setUp(self):
-        DataTypeTestingBase.setUp(self)
-        self.data = self.hierarchy.grids[0]
-
-class TestExtractFromSphere(TestSphereDataType):
-    def setUp(self):
-        TestSphereDataType.setUp(self)
-        self.region = self.data
-        self.ind_to_get = na.where(self.region["Temperature"]>500)
-        self.data = self.region.extract_region(self.ind_to_get)
-
-    def testNumberOfEntries(self):
-        self.assertEqual(self.ind_to_get[0].shape,
-                        self.data["Density"].shape)
-    def testVolume(self):
-        self.ind_to_get = na.where(self.region["CellVolume"]>0.0)
-        vol = self.region.extract_region(self.ind_to_get)["CellVolume"].sum() \
-            / self.data.convert("cm")**3.0
-        self.assertAlmostEqual(vol,1.0,7)
-
-    def testJoin(self):
-        new_region = self.region.extract_region(
-                self.region["Temperature"]<=500)
-        joined_region = self.data.join(new_region)
-        self.assertEqual(joined_region["CellMassMsun"].sum(),
-                         self.region["CellMassMsun"].sum())
-
-    # I have verified that the *old* version of the code overconnected
-    # contours outside.  This are overridden to make sure this does not
-    # happen again!
-    def testContoursObtain(self):
-        cid = identify_contours(self.data, "Density",
-                self.data["Density"].min()*2.00, self.data["Density"].max()*1.01)
-        self.assertEqual(len(cid), 10)
-
-    def testContoursCache(self):
-        cid = identify_contours(self.data, "Density",
-                self.data["Density"].min()*2.00,
-                self.data["Density"].max()*1.01)
-        self.assertEqual(len(cid), 10)
-
-    def testExtractConnectedSetsNoCache(self):
-        mi = self.data["Density"].min() * 2.0
-        ma = self.data["Density"].max() * 0.99
-        cons, contours = self.data.extract_connected_sets(
-            "Density", 2, mi, ma)
-        self.assertEqual(len(contours), 2) # number of contour levels
-        self.assertEqual(len(contours[0]), 10)
-        self.assertEqual(len(contours[1]), 1)
-
-    def testExtractConnectedSetsCache(self):
-        mi = self.data["Density"].min() * 2.0
-        ma = self.data["Density"].max() * 0.99
-        cons, contours = self.data.extract_connected_sets(
-            "Density", 2, mi, ma, cache=True)
-        self.assertEqual(len(contours), 2) # number of contour levels
-        self.assertEqual(len(contours[0]), 10)
-        self.assertEqual(len(contours[1]), 1)
-
-class TestExtractFromRegion(TestRegionDataType):
-    def setUp(self):
-        TestRegionDataType.setUp(self)
-        self.region = self.data
-        self.ind_to_get = na.where(self.region["Temperature"]>500)
-        self.data = self.region.extract_region(self.ind_to_get)
-    def testNumberOfEntries(self):
-        self.assertEqual(self.ind_to_get[0].shape,
-                        self.data["Density"].shape)
-    def testVolume(self):
-        ind_to_get = na.where(self.region["CellVolume"]>0.0)
-        vol = self.region.extract_region(ind_to_get)["CellVolume"].sum() \
-            / self.data.convert("cm")**3.0
-        self.assertAlmostEqual(vol,1.0,7)
-
-    def testJoin(self):
-        new_region = self.region.extract_region(
-                self.region["Temperature"]<=500)
-        joined_region = self.data.join(new_region)
-        self.assertEqual(joined_region["CellMassMsun"].sum(),
-                         self.region["CellMassMsun"].sum())
-
-    # I have verified that the *old* version of the code overconnected
-    # contours outside.  This are overridden to make sure this does not
-    # happen again!
-    def testContoursObtain(self):
-        cid = identify_contours(self.data, "Density",
-                self.data["Density"].min()*2.00, self.data["Density"].max()*1.01)
-        self.assertEqual(len(cid), 10)
-
-    def testContoursCache(self):
-        cid = identify_contours(self.data, "Density",
-                self.data["Density"].min()*2.00,
-                self.data["Density"].max()*1.01)
-        self.assertEqual(len(cid), 10)
-
-    def testExtractConnectedSetsNoCache(self):
-        mi = self.data["Density"].min() * 2.0
-        ma = self.data["Density"].max() * 0.99
-        cons, contours = self.data.extract_connected_sets(
-            "Density", 2, mi, ma)
-        self.assertEqual(len(contours), 2) # number of contour levels
-        self.assertEqual(len(contours[0]), 10)
-        self.assertEqual(len(contours[1]), 1)
-
-    def testExtractConnectedSetsCache(self):
-        mi = self.data["Density"].min() * 2.0
-        ma = self.data["Density"].max() * 0.99
-        cons, contours = self.data.extract_connected_sets(
-            "Density", 2, mi, ma, cache=True)
-        self.assertEqual(len(contours), 2) # number of contour levels
-        self.assertEqual(len(contours[0]), 10)
-        self.assertEqual(len(contours[1]), 1)
-
-
-class TestUnilinearInterpolator(unittest.TestCase):
-    def setUp(self):
-        x0, x1 = na.random.uniform(-100,100,2)
-        nstep_x = na.random.randint(10,200)
-        nvals = na.random.randint(100,1000)
-
-        table = na.mgrid[x0:x1:nstep_x*1j]
-
-        self.ufi_x = UnilinearFieldInterpolator(table,
-                      (x0,x1),'x')
-        self.my_dict = {}
-        self.my_dict['x'] = na.random.uniform(x0,x1,nvals)
-
-    def testXInt(self):
-        nv = self.ufi_x(self.my_dict)
-        for i,v in enumerate(nv):
-            self.assertAlmostEqual(v, self.my_dict['x'][i], 5)
-
-class TestBilinearInterpolator(unittest.TestCase):
-    def setUp(self):
-        x0, x1 = na.random.uniform(-100,100,2)
-        y0, y1 = na.random.uniform(-100,100,2)
-        nstep_x = na.random.randint(10,200)
-        nstep_y = na.random.randint(10,200)
-        nvals = na.random.randint(100,1000)
-
-        table = na.mgrid[x0:x1:nstep_x*1j,
-                         y0:y1:nstep_y*1j]
-
-        self.bfi_x = BilinearFieldInterpolator(table[0,...],
-                      (x0,x1,y0,y1),['x','y'])
-        self.bfi_y = BilinearFieldInterpolator(table[1,...],
-                      (x0,x1,y0,y1),['x','y'])
-        self.my_dict = {}
-        self.my_dict['x'] = na.random.uniform(x0,x1,nvals)
-        self.my_dict['y'] = na.random.uniform(y0,y1,nvals)
-
-    def testXInt(self):
-        nv = self.bfi_x(self.my_dict)
-        for i,v in enumerate(nv):
-            self.assertAlmostEqual(v, self.my_dict['x'][i], 5)
-
-    def testYInt(self):
-        nv = self.bfi_y(self.my_dict)
-        for i,v in enumerate(nv):
-            self.assertAlmostEqual(v, self.my_dict['y'][i], 5)
-
-class TestTrilinearInterpolator(unittest.TestCase):
-    def setUp(self):
-        x0, x1 = na.random.uniform(-100,100,2)
-        y0, y1 = na.random.uniform(-100,100,2)
-        z0, z1 = na.random.uniform(-100,100,2)
-        nstep_x = na.random.randint(10,200)
-        nstep_y = na.random.randint(10,200)
-        nstep_z = na.random.randint(10,200)
-        nvals = na.random.randint(100,1000)
-
-        table = na.mgrid[x0:x1:nstep_x*1j,
-                         y0:y1:nstep_y*1j,
-                         z0:z1:nstep_z*1j]
-
-        self.tfi_x = TrilinearFieldInterpolator(table[0,...],
-                      (x0,x1,y0,y1,z0,z1),['x','y','z'])
-        self.tfi_y = TrilinearFieldInterpolator(table[1,...],
-                      (x0,x1,y0,y1,z0,z1),['x','y','z'])
-        self.tfi_z = TrilinearFieldInterpolator(table[2,...],
-                      (x0,x1,y0,y1,z0,z1),['x','y','z'])
-        self.my_dict = {}
-        self.my_dict['x'] = na.random.uniform(x0,x1,nvals)
-        self.my_dict['y'] = na.random.uniform(y0,y1,nvals)
-        self.my_dict['z'] = na.random.uniform(z0,z1,nvals)
-
-    def testXInt(self):
-        nv = self.tfi_x(self.my_dict)
-        for i,v in enumerate(nv):
-            self.assertAlmostEqual(v, self.my_dict['x'][i], 5)
-
-    def testYInt(self):
-        nv = self.tfi_y(self.my_dict)
-        for i,v in enumerate(nv):
-            self.assertAlmostEqual(v, self.my_dict['y'][i], 5)
-
-    def testZInt(self):
-        nv = self.tfi_z(self.my_dict)
-        for i,v in enumerate(nv):
-            self.assertAlmostEqual(v, self.my_dict['z'][i], 5)
-
-
-
-if __name__ == "__main__":
-    unittest.main()


--- a/tests/test_raven.py	Wed May 18 09:37:36 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,175 +0,0 @@
-"""
-Test that we can make plots
-"""
-
-import unittest, glob, os.path, os, sys, StringIO
-
-print "Reporting from %s" % (os.getcwd())
-sys.path = ['.'] + sys.path
-
-from yt.config import ytcfg
-ytcfg["yt","LogLevel"] = '50'
-ytcfg["yt","logFile"] = "False"
-ytcfg["yt","suppressStreamLogging"] = "True"
-ytcfg["lagos","serialize"] = "False"
-
-import numpy as na
-import yt.lagos
-import yt.raven
-
-# The dataset used is located at:
-# http://yt.spacepope.org/DD0018.zip
-fn = "DD0010/moving7_0010"
-fn = os.path.join(os.path.dirname(__file__),fn)
-
-class RavenTestingBase:
-    def setUp(self):
-        self.OutputFile = yt.lagos.EnzoStaticOutput(fn)
-        self.hierarchy = self.OutputFile.hierarchy
-        self.pc = yt.raven.PlotCollection(self.OutputFile)
-        self.v, self.c = self.hierarchy.find_max("Density")
-        gp = os.path.join(os.path.dirname(fn),"*.yt")
-        ytFiles = glob.glob(gp)
-        for i in ytFiles:
-            os.unlink(i)
-        self.localSetup()
-
-    def tearDown(self):
-        if hasattr(self,'data'): del self.data
-        if hasattr(self,'region'): del self.region
-        if hasattr(self,'ind_to_get'): del self.ind_to_get
-        if hasattr(self,'pc'): del self.pc
-        del self.OutputFile, self.hierarchy
-        
-    def DoSave(self):
-        fns=self.pc.save("test")
-        for fn in fns:
-            os.unlink(fn)
-
-    def _testSlice(self):
-        self.pc.add_slice("Density",0)
-        self.pc.plots[-1].switch_z("CellMass")
-        self.pc.set_width(0.5,'1')
-        self.pc.set_zlim(1,1000)
-        self.pc.set_cmap("hot")
-        self.DoSave()
-
-    def _testProjection(self):
-        self.pc.add_projection("Temperature",1,weight_field="Density")
-        self.pc.set_width(0.5,'1')
-        self.pc.set_zlim(1,1000)
-        self.pc.set_cmap("hot")
-        self.DoSave()
-
-    def _testThreePhaseSphere(self):
-        print "Testing ThreePhase"
-        self.pc.add_phase_sphere(1.0,'1',["Density","Temperature","Density"],center=self.c)
-        self.DoSave()
-
-    def _testCallbacksOnSlices(self):
-        # We test a couple things here
-        # Add callbacks, then remove one, then do the plot-saving
-        # Both types of callback should be called here
-        for ax in range(3):
-            self.pc.add_slice("Density", 0)
-            x,y = yt.raven.axis_labels[ax]
-            v1 = "%s-velocity" % (x)
-            v2 = "%s-velocity" % (y)
-            qi = self.pc.plots[-1].add_callback(yt.raven.QuiverCallback(v1,v2,ax,32))
-            ti = self.pc.plots[-1].add_callback(yt.raven.ContourCallback("Temperature",
-                                               ncont=3, factor=10))
-            gi = self.pc.plots[-1].add_callback(yt.raven.ContourCallback("Gas_Energy",
-                                               ncont=3, factor=10))
-            self.pc.plots[-1].remove_callback(gi)
-        self.DoSave()
-
-class PlotTestingBase(RavenTestingBase):
-    def test_set_xlim(self):
-        self.pc.set_xlim(0.25,0.75)
-        self.DoSave()
-
-    def test_set_ylim(self):
-        self.pc.set_ylim(0.25,0.75)
-        self.DoSave()
-
-    def test_autoscale(self):
-        # verify autoscale changed
-        self.pc.autoscale()
-        self.DoSave()
-
-    def test_set_zlim(self):
-        self.pc.set_zlim(0.5, 1.0)
-        self.DoSave()
-
-    def test_set_lim(self):
-        self.pc.set_lim((0.25,0.75,0.25,0.75))
-        self.DoSave()
-
-    def test_set_width(self):
-        self.pc.set_width(0.25,'1')
-        self.DoSave()
-
-    def test_set_cmap(self):
-        self.pc.set_cmap("kamae")
-        self.DoSave()
-        self.pc.set_cmap("jet")
-        self.DoSave()
-
-    def test_switch_field(self):
-        for field in ["Temperature","x-velocity"]:
-            self.pc.switch_field(field)
-            # Check if the logging is set correctly
-            self.DoSave()
-
-    def test_clear_plots(self):
-        self.pc.clear_plots()
-        self.assertTrue(len(self.pc.plots) == 0)
-
-    def test_set_label(self):
-        for p in self.pc.plots: p.set_label(r"$\rm{Hi}$")
-        self.DoSave()
-        for p in self.pc.plots: p.set_label("Hi!")
-        self.DoSave()
-
-    def test_set_logfield(self):
-        for p in self.pc.plots: p.set_log_field(False)
-        self.DoSave()
-        for p in self.pc.plots: p.set_log_field(False)
-        self.DoSave()
-
-    def test_save(self):
-        self.DoSave()
-
-class TestSlices(PlotTestingBase, unittest.TestCase):
-    def localSetup(self):
-        self.pc.add_slice("Density",0)
-        self.pc.add_slice("Density",1)
-        self.pc.add_slice("Density",2)
-
-class TestSphere(PlotTestingBase, unittest.TestCase):
-    def localSetup(self):
-        self.pc.add_phase_sphere(1.0,'1',
-                ["Density","TotalEnergy","y-velocity"])
-
-class TestPhaseObject(PlotTestingBase, unittest.TestCase):
-    def localSetup(self):
-        obj = self.hierarchy.region([0.5]*3, [0.0]*3, [1.0]*3)
-        self.pc.add_phase_object(obj, ["Density","TotalEnergy","y-velocity"])
-
-class TestProjection(PlotTestingBase, unittest.TestCase):
-    def localSetup(self):
-        self.pc.add_projection("Density", 0)
-        self.pc.add_projection("Temperature", 1)
-        self.pc.add_projection("x-velocity", 2, weight_field="Density")
-
-class TestMixProjectionSlice(PlotTestingBase, unittest.TestCase):
-    def localSetup(self):
-        self.pc.add_projection("Density",0)
-        self.pc.add_slice("Density",0)
-
-class TestCuttingPlane(PlotTestingBase, unittest.TestCase):
-    def localSetup(self):
-        self.pc.add_cutting_plane("Density", [0.1,0.2,0.3])
-
-if __name__ == "__main__":
-    unittest.main()


--- a/yt/config.py	Wed May 18 09:37:36 2011 -0400
+++ b/yt/config.py	Wed May 18 11:21:51 2011 -0400
@@ -47,6 +47,7 @@
     pluginfilename = 'my_plugins.py',
     parallel_traceback = 'False',
     pasteboard_repo = '',
+    test_storage_dir = '/does/not/exist',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten


--- a/yt/utilities/answer_testing/api.py	Wed May 18 09:37:36 2011 -0400
+++ b/yt/utilities/answer_testing/api.py	Wed May 18 11:21:51 2011 -0400
@@ -32,7 +32,8 @@
     RegressionTestRunner, \
     RegressionTestStorage, \
     run_main, \
-    clear_registry
+    clear_registry, \
+    registry_entries
 
 from .output_tests import \
     YTStaticOutputTest, \


--- a/yt/utilities/answer_testing/output_tests.py	Wed May 18 09:37:36 2011 -0400
+++ b/yt/utilities/answer_testing/output_tests.py	Wed May 18 11:21:51 2011 -0400
@@ -118,8 +118,8 @@
         """
         if a1.shape != a2.shape:
             raise ShapeMismatch(a1, a2)
-        delta = na.abs(a1 - a2)/(a1 + a2)
-        if delta.max() > acceptable:
+        delta = na.abs(a1 - a2).astype("float64")/(a1 + a2)
+        if na.nanmax(delta) > acceptable:
             raise ArrayDelta(delta, acceptable)
         return True
 
@@ -158,8 +158,11 @@
         self.io_log = io_log
 
     def __iter__(self):
-        for line in open(self.io_log):
-            yield line[len(self.io_log_header):].split()[0].strip()
+        if isinstance(self.io_log, types.StringTypes):
+            for line in open(self.io_log):
+                yield line[len(self.io_log_header):].split()[0].strip()
+        elif isinstance(self.io_log, types.ListType):
+            for line in self.io_log: yield line
 
 def create_test(base, new_name, **attrs):
     """


--- a/yt/utilities/answer_testing/runner.py	Wed May 18 09:37:36 2011 -0400
+++ b/yt/utilities/answer_testing/runner.py	Wed May 18 11:21:51 2011 -0400
@@ -44,6 +44,9 @@
         return "FileNotExistException: %s" % (self.filename)
 
 
+def registry_entries():
+    return test_registry.keys()
+
 class RegressionTestStorage(object):
     def __init__(self, results_id, path = "."):
         self.id = results_id


--- a/yt/utilities/command_line.py	Wed May 18 09:37:36 2011 -0400
+++ b/yt/utilities/command_line.py	Wed May 18 11:21:51 2011 -0400
@@ -270,6 +270,13 @@
     commands.identify(u, repo)
     return u.popbuffer()
 
+def get_yt_version():
+    import pkg_resources
+    yt_provider = pkg_resources.get_provider("yt")
+    path = os.path.dirname(yt_provider.module_path)
+    version = _get_hg_version(path)[:12]
+    return version
+
 class YTCommands(cmdln.Cmdln):
     name="yt"
 


http://bitbucket.org/yt_analysis/yt/changeset/f5d9d75f5c88/
changeset:   r4271:f5d9d75f5c88
branch:      yt
user:        MatthewTurk
date:        2011-05-18 17:47:49
summary:     Updating setup.py to include leaflet during installation
affected #:  1 file (347 bytes)

--- a/setup.py	Wed May 18 11:21:51 2011 -0400
+++ b/setup.py	Wed May 18 11:47:49 2011 -0400
@@ -12,6 +12,9 @@
 DATA_FILES_JS   = glob.glob('yt/gui/reason/html/js/*.js')
 DATA_FILES_PNG  = glob.glob('yt/gui/reason/html/images/*.png') \
                 + glob.glob('yt/gui/reason/html/images/*.ico')
+DATA_FILES_LL   = glob.glob('yt/gui/reason/html/leaflet/*.js') \
+                + glob.glob('yt/gui/reason/html/leaflet/*.css')
+DATA_FILES_LLI  = glob.glob('yt/gui/reason/html/leaflet/images/*.png')
 
 # Verify that we have Cython installed
 try:
@@ -133,7 +136,9 @@
         zip_safe=False,
         data_files = [('yt/gui/reason/html/', DATA_FILES_HTML),
                       ('yt/gui/reason/html/js/', DATA_FILES_JS),
-                      ('yt/gui/reason/html/images/', DATA_FILES_PNG)],
+                      ('yt/gui/reason/html/images/', DATA_FILES_PNG),
+                      ('yt/gui/reason/html/leaflet/', DATA_FILES_LL),
+                      ('yt/gui/reason/html/leaflet/images', DATA_FILES_LLI)],
         )
     return

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list