[Yt-svn] yt: Moving the answer tests into the main yt repository. Adding...

hg at spacepope.org hg at spacepope.org
Sat Sep 11 02:10:38 PDT 2010


hg Repository: yt
details:   yt/rev/5459185dfb49
changeset: 3396:5459185dfb49
user:      Matthew Turk <matthewturk at gmail.com>
date:
Sat Sep 11 02:10:23 2010 -0700
description:
Moving the answer tests into the main yt repository.  Adding a number of
niceties to answer testing: specify datasets on the command line, make
importing work more nicely (and so you can specify tests by relative path now)
and results can be stored anywhere.  Here's an example execution of one of the
answer test modules:

python2.6 -m yt.utilities.answer_testing.runner store \
    -f ~/Research/data/RD0005-mine/RedshiftOutput0005 \
    -p tests/results/ reference tests/answer_tests/hierarchy_consistency.py

diffstat:

 tests/answer_tests/fields_to_test.py        |   9 ++++
 tests/answer_tests/hierarchy_consistency.py |  59 +++++++++++++++++++++++++++++
 tests/answer_tests/object_field_values.py   |  59 +++++++++++++++++++++++++++++
 tests/answer_tests/projections.py           |  16 ++++++++
 yt/utilities/answer_testing/output_tests.py |  17 +++++---
 yt/utilities/answer_testing/runner.py       |  65 ++++++++++++++++++++++++++------
 6 files changed, 206 insertions(+), 19 deletions(-)

diffs (296 lines):

diff -r 07a945f94009 -r 5459185dfb49 tests/answer_tests/fields_to_test.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/answer_tests/fields_to_test.py	Sat Sep 11 02:10:23 2010 -0700
@@ -0,0 +1,9 @@
+# We want to test several things.  We need to be able to run the 
+
+field_list = ["Density", "Temperature", "x-velocity", "y-velocity", "z-velocity",
+                  # Now some derived fields
+                  "Pressure", "SoundSpeed", "particle_density", "Entropy",
+                  # Ghost zones
+                  "AveragedDensity", "DivV"]
+
+particle_field_list = ["particle_position_x", "ParticleMassMsun"]
diff -r 07a945f94009 -r 5459185dfb49 tests/answer_tests/hierarchy_consistency.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/answer_tests/hierarchy_consistency.py	Sat Sep 11 02:10:23 2010 -0700
@@ -0,0 +1,59 @@
+import numpy as na
+
+from yt.utilities.answer_testing.output_tests import \
+    YTStaticOutputTest, RegressionTestException
+from yt.funcs import ensure_list
+
+class HierarchyInconsistent(RegressionTestException):
+    pass
+
+class HierarchyConsistency(YTStaticOutputTest):
+    name = "hierarchy_consistency"
+    def run(self):
+        self.result = \
+            all( g in ensure_list(c.Parent) for g in self.pf.h.grids
+                                            for c in g.Children )
+
+    def compare(self, old_result):
+        if not(old_result and self.result): raise HierarchyInconsistent()
+
+class GridLocationsProperties(YTStaticOutputTest):
+    name = "level_consistency"
+    def run(self):
+        self.result = dict(grid_left_edge = self.pf.h.grid_left_edge,
+                           grid_right_edge = self.pf.h.grid_right_edge,
+                           grid_levels = self.pf.h.grid_levels,
+                           grid_particle_count = self.pf.h.grid_particle_count,
+                           grid_dimensions = self.pf.h.grid_dimensions)
+
+    def compare(self, old_result):
+        # We allow now difference between these values
+        self.compare_data_arrays(self.result, old_result, 0.0)
+
+class GridRelationshipsChanged(RegressionTestException):
+    pass
+
+class GridRelationships(YTStaticOutputTest):
+
+    name = "grid_relationships"
+    def run(self):
+        self.result = [ [p.id for p in ensure_list(g.Parent) if g.Parent is not None]
+                        for g in self.pf.h.grids ]
+
+    def compare(self, old_result):
+        if len(old_result) != len(self.result):
+            raise GridRelationshipsChanged()
+        for plist1, plist2 in zip(old_result, self.result):
+            if len(plist1) != len(plist2): raise GridRelationshipsChanged()
+            if not all( (p1 == p2 for p1, p2 in zip(plist1, plist2) ) ):
+                raise GridRelationshipsChanged()
+
+class GridGlobalIndices(YTStaticOutputTest):
+    name = "global_startindex"
+
+    def run(self):
+        self.result = na.array([g.get_global_startindex()
+                                for g in self.pf.h.grids])
+
+    def compare(self, old_result):
+        self.compare_array_delta(old_result, self.result, 0.0)
diff -r 07a945f94009 -r 5459185dfb49 tests/answer_tests/object_field_values.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/answer_tests/object_field_values.py	Sat Sep 11 02:10:23 2010 -0700
@@ -0,0 +1,59 @@
+import hashlib
+import numpy as na
+
+from yt.utilities.answer_testing.output_tests import \
+    YTStaticOutputTest, RegressionTestException, create_test
+from yt.funcs import ensure_list
+from fields_to_test import field_list, particle_field_list
+
+class FieldHashesDontMatch(RegressionTestException):
+    pass
+
+class YTFieldValuesTest(YTStaticOutputTest):
+    def run(self):
+        vals = self.data_object[self.field].copy()
+        vals.sort()
+        self.result = hashlib.sha256(vals.tostring()).hexdigest()
+
+    def compare(self, old_result):
+        if self.result != old_result: raise FieldHashesDontMatch
+
+class CenteredSphere(YTFieldValuesTest):
+
+    def setup(self):
+        YTFieldValuesTest.setup(self)
+        center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+        width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
+        self.data_object = self.pf.h.sphere(center, width/0.25)
+
+for field in field_list + particle_field_list:
+    create_test(CenteredSphere, "centered_sphere_%s" % (field), field = field)
+
+class OffCenteredSphere(YTFieldValuesTest):
+
+    def setup(self):
+        YTFieldValuesTest.setup(self)
+        center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+        width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
+        self.data_object = self.pf.h.sphere(center - 0.25 * width, width/0.25)
+
+for field in field_list + particle_field_list:
+    create_test(OffCenteredSphere, "off_centered_sphere_%s" % (field), field = field)
+
+class CornerSphere(YTFieldValuesTest):
+
+    def setup(self):
+        YTFieldValuesTest.setup(self)
+        width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
+        self.data_object = self.pf.h.sphere(self.pf.domain_left_edge, width/0.25)
+
+for field in field_list + particle_field_list:
+    create_test(CornerSphere, "corner_sphere_%s" % (field), field = field)
+
+class AllData(YTFieldValuesTest):
+    def setup(self):
+        YTFieldValuesTest.setup(self)
+        self.data_object = self.pf.h.all_data()
+
+for field in field_list + particle_field_list:
+    create_test(AllData, "all_data_%s" % (field), field = field)
diff -r 07a945f94009 -r 5459185dfb49 tests/answer_tests/projections.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/answer_tests/projections.py	Sat Sep 11 02:10:23 2010 -0700
@@ -0,0 +1,16 @@
+from yt.utilities.answer_testing.output_tests import \
+    SingleOutputTest, create_test
+from yt.utilities.answer_testing.hydro_tests import \
+    TestProjection, TestGasDistribution
+from fields_to_test import field_list
+
+for axis in range(3):
+    for field in field_list:
+        create_test(TestProjection, "projection_test_%s_%s" % (axis, field),
+                    field = field, axis = axis)
+
+for field in field_list:
+    create_test(TestGasDistribution, "profile_density_test_%s" % field,
+                field_x = "Density", field_y = field)
+
+
diff -r 07a945f94009 -r 5459185dfb49 yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py	Sat Sep 11 01:12:12 2010 -0700
+++ b/yt/utilities/answer_testing/output_tests.py	Sat Sep 11 02:10:23 2010 -0700
@@ -2,13 +2,16 @@
 
 # We first create our dictionary of tests to run.  This starts out empty, and
 # as tests are imported it will be filled.
-class TestRegistry(dict):
-    def __new__(cls, *p, **k):
-        if not '_the_instance' in cls.__dict__:
-            cls._the_instance = dict.__new__(cls)
-            return cls._the_instance
-    
-test_registry = TestRegistry()
+if "TestRegistry" not in locals():
+    print "Initializing TestRegistry"
+    class TestRegistry(dict):
+        def __new__(cls, *p, **k):
+            if not '_the_instance' in cls.__dict__:
+                cls._the_instance = dict.__new__(cls)
+                return cls._the_instance
+if "test_registry" not in locals():
+    print "Initializing test_registry"
+    test_registry = TestRegistry()
 
 # The exceptions we raise, related to the character of the failure.
 
diff -r 07a945f94009 -r 5459185dfb49 yt/utilities/answer_testing/runner.py
--- a/yt/utilities/answer_testing/runner.py	Sat Sep 11 01:12:12 2010 -0700
+++ b/yt/utilities/answer_testing/runner.py	Sat Sep 11 02:10:23 2010 -0700
@@ -1,5 +1,6 @@
 import matplotlib; matplotlib.use("Agg")
-import os, shelve, cPickle, sys
+import os, shelve, cPickle, sys, imp, tempfile
+
 from yt.config import ytcfg; ytcfg["yt","serialize"] = "False"
 import yt.utilities.cmdln as cmdln
 from .xunit import Xunit
@@ -59,7 +60,6 @@
         test = test_registry[name]
         plot_list = []
         if test.output_type == 'single':
-            print self.io_log
             mot = MultipleOutputTest(self.io_log)
             for i,fn in enumerate(mot):
                 # This next line is to keep the shelve module
@@ -116,6 +116,32 @@
 class EnzoTestRunnerCommands(cmdln.Cmdln):
     name = "enzo_tests"
 
+    def _load_modules(self, test_modules):
+        for fn in test_modules:
+            if fn.endswith(".py"): fn = fn[:-3]
+            print "Loading module %s" % (fn)
+            mname = os.path.basename(fn)
+            f, filename, desc = imp.find_module(mname, [os.path.dirname(fn)])
+            project = imp.load_module(mname, f, filename, desc)
+
+    def _update_io_log(self, opts, kwargs):
+        if len(opts.datasets) == 0: return
+        f = tempfile.NamedTemporaryFile()
+        kwargs['io_log'] = f.name
+        for d in opts.datasets:
+            fn = os.path.expanduser(d)
+            print "Registered dataset %s" % fn
+            f.write("DATASET WRITTEN %s\n" % fn)
+        f.flush()
+        f.seek(0)
+        return f
+
+    @cmdln.option("-f", "--dataset", action="append",
+                  help="override the io_log and add this to the new one",
+                  dest="datasets")
+    @cmdln.option("-p", "--results-path", action="store",
+                  help="which directory should results be stored in",
+                  dest="results_path", default=".")
     def do_store(self, subcmd, opts, name, *test_modules):
         """
         ${cmd_name}: Run and store a new dataset.
@@ -124,19 +150,26 @@
         ${cmd_option_list}
         """
         sys.path.insert(0, ".")
-        for fn in test_modules:
-            if fn.endswith(".py"): fn = fn[:-3]
-            print "Loading module %s" % (fn)
-            __import__(fn)
-        test_runner = RegressionTestRunner(name)
+        self._load_modules(test_modules)
+        kwargs = {}
+        f = self._update_io_log(opts, kwargs)
+        test_runner = RegressionTestRunner(name,
+                results_path = opts.results_path,
+                **kwargs)
         test_runner.run_all_tests()
 
     @cmdln.option("-o", "--output", action="store",
                   help="output results to file",
                   dest="outputfile", default=None)
+    @cmdln.option("-p", "--results-path", action="store",
+                  help="which directory should results be stored in",
+                  dest="results_path", default=".")
     @cmdln.option("-n", "--nose", action="store_true",
                   help="run through nose with xUnit testing",
                   dest="run_nose", default=False)
+    @cmdln.option("-f", "--dataset", action="append",
+                  help="override the io_log and add this to the new one",
+                  dest="datasets")
     def do_compare(self, subcmd, opts, reference, comparison, *test_modules):
         """
         ${cmd_name}: Compare a reference dataset against a new dataset.  The
@@ -145,12 +178,20 @@
         ${cmd_usage}
         ${cmd_option_list}
         """
+        if comparison == "__CURRENT__":
+            import pkg_resources
+            yt_provider = pkg_resources.get_provider("yt")
+            path = os.path.dirname(yt_provider.module_path)
+            from yt.utilities.command_line import _get_hg_version
+            comparison = _get_hg_version(path)[:12]
+            print "Setting comparison to: %s" % (comparison)
         sys.path.insert(0, ".")
-        for fn in test_modules:
-            if fn.endswith(".py"): fn = fn[:-3]
-            print "Loading module %s" % (fn)
-            __import__(fn)
-        test_runner = RegressionTestRunner(comparison, reference)
+        self._load_modules(test_modules)
+        kwargs = {}
+        f = self._update_io_log(opts, kwargs)
+        test_runner = RegressionTestRunner(comparison, reference,
+                            results_path=opts.results_path,
+                            **kwargs)
         if opts.run_nose:
             test_runner.watcher = Xunit()
         results = test_runner.run_all_tests()



More information about the yt-svn mailing list