[yt-svn] commit/yt: 18 new changesets

Bitbucket commits-noreply at bitbucket.org
Tue Oct 23 15:53:36 PDT 2012


18 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/e25156a7dfa0/
changeset:   e25156a7dfa0
branch:      yt
user:        MatthewTurk
date:        2012-10-15 06:37:52
summary:     Adding the initial nose plugin for answer testing.
affected #:  3 files

diff -r c33844a7aa055486cbcf1a7f0176c60bb5e2fc0b -r e25156a7dfa013a4f60cfe3378c03fe0157de326 setup.py
--- a/setup.py
+++ b/setup.py
@@ -154,7 +154,11 @@
             'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
                             'yt = yt.utilities.command_line:run_main',
-                       ]},
+                      ],
+                      'nose.plugins.0.10': [
+                            'answer-testing = yt.utilities.answer_testing.api:AnswerTesting'
+                      ]
+        },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",


diff -r c33844a7aa055486cbcf1a7f0176c60bb5e2fc0b -r e25156a7dfa013a4f60cfe3378c03fe0157de326 yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -57,3 +57,8 @@
     TestBooleanANDParticleQuantity, \
     TestBooleanORParticleQuantity, \
     TestBooleanNOTParticleQuantity
+
+try:
+    from .framework import AnswerTesting
+except ImportError:
+    raise


diff -r c33844a7aa055486cbcf1a7f0176c60bb5e2fc0b -r e25156a7dfa013a4f60cfe3378c03fe0157de326 yt/utilities/answer_testing/framework.py
--- /dev/null
+++ b/yt/utilities/answer_testing/framework.py
@@ -0,0 +1,65 @@
+"""
+Answer Testing using Nose as a starting point
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import logging
+import os
+
+from yt.testing import *
+from yt.utilities.command_line import get_yt_version
+from yt.config import ytcfg
+from nose.plugins import Plugin
+
+log = logging.getLogger('nose.plugins.answer-testing')
+
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        test_storage_directory = ytcfg.get("yt", "test_storage_dir")
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        parser.add_option("--answer-parameter-file", dest="parameter_file",
+            default=os.path.join(os.getcwd(), "tests/DD0010/moving7_0010"),
+            help="The parameter file value to feed to 'load' to test against")
+        parser.add_option("--answer-output", dest="storage_dir",
+            default=test_storage_directory,
+            help="Base directory for storing test output.")
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=None,
+            help="The name against which we will compare")
+        parser.add_option("--answer-name", dest="this_name",
+            default=my_hash,
+            help="The name we'll call this set of tests")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+
+    def finalize(self, result):
+        pass



https://bitbucket.org/yt_analysis/yt/changeset/ad59029089d4/
changeset:   ad59029089d4
branch:      yt
user:        MatthewTurk
date:        2012-10-15 07:11:44
summary:     A bit more fleshing out of the new way of having the answer testing plugin,
including some loading of tests from storage.
affected #:  1 file

diff -r e25156a7dfa013a4f60cfe3378c03fe0157de326 -r ad59029089d41fd2269d9d6ca5fdeb6d96b55aae yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -25,6 +25,7 @@
 
 import logging
 import os
+import shelve
 
 from yt.testing import *
 from yt.utilities.command_line import get_yt_version
@@ -60,6 +61,32 @@
         super(AnswerTesting, self).configure(options, conf)
         if not self.enabled:
             return
+        AnswerTestingTest.result_storage = shelve.Shelf(
+            os.path.join(options.storage_dir,
+                         options.this_name))
+        if options.compare_name is not None:
+            AnswerTestingTest.reference_storage = shelve.Shelf(
+                os.path.join(options.storage_dir,
+                            options.compare_name))
 
     def finalize(self, result):
         pass
+
+class AnswerTestingTest(object):
+    reference_storage = None
+
+    description = None
+    def __init__(self, name, pf_fn):
+        self.pf = load(pf_fn)
+        self.name = "%s_%s" % (pf, name)
+
+    def __call__(self):
+        if self.reference_storage is not None:
+            ov = self.reference_storage.get(self.name, None)
+        else:
+            ov = None
+        nv = self.run()
+        return self.compare(nv, ov)
+
+    def compare(self, new_result, old_result):
+        pass



https://bitbucket.org/yt_analysis/yt/changeset/938a408e6867/
changeset:   938a408e6867
branch:      yt
user:        MatthewTurk
date:        2012-10-16 08:59:45
summary:     Porting over a bunch of the methods of testing individual data outputs.  Added
a first set of Enzo tests.  Still DIY.
affected #:  3 files

diff -r ad59029089d41fd2269d9d6ca5fdeb6d96b55aae -r 938a408e686781812245aa094fbafda19260edcd yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -54,6 +54,7 @@
     pasteboard_repo = '',
     reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
+    data_storage_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',


diff -r ad59029089d41fd2269d9d6ca5fdeb6d96b55aae -r 938a408e686781812245aa094fbafda19260edcd yt/frontends/enzo/tests/test_moving7.py
--- /dev/null
+++ b/yt/frontends/enzo/tests/test_moving7.py
@@ -0,0 +1,34 @@
+"""
+Enzo frontend tests using moving7
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import assert_fields
+from yt.frontends.enzo.api import EnzoStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude")
+
+def test_moving7():
+    for k in assert_fields("DD0010/moving7_0010", _fields):
+        yield k


diff -r ad59029089d41fd2269d9d6ca5fdeb6d96b55aae -r 938a408e686781812245aa094fbafda19260edcd yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -26,11 +26,14 @@
 import logging
 import os
 import shelve
+import hashlib
+import contextlib
 
 from yt.testing import *
 from yt.utilities.command_line import get_yt_version
 from yt.config import ytcfg
 from nose.plugins import Plugin
+from yt.mods import *
 
 log = logging.getLogger('nose.plugins.answer-testing')
 
@@ -61,32 +64,176 @@
         super(AnswerTesting, self).configure(options, conf)
         if not self.enabled:
             return
-        AnswerTestingTest.result_storage = shelve.Shelf(
+        AnswerTestingTest.result_storage = shelve.open(
             os.path.join(options.storage_dir,
                          options.this_name))
         if options.compare_name is not None:
-            AnswerTestingTest.reference_storage = shelve.Shelf(
+            AnswerTestingTest.reference_storage = shelve.open(
                 os.path.join(options.storage_dir,
                             options.compare_name))
 
     def finalize(self, result):
         pass
 
+ at contextlib.contextmanager
+def temp_cwd(cwd):
+    oldcwd = os.getcwd()
+    os.chdir(cwd)
+    yield
+    os.chdir(oldcwd)
+
 class AnswerTestingTest(object):
     reference_storage = None
 
     description = None
     def __init__(self, name, pf_fn):
-        self.pf = load(pf_fn)
-        self.name = "%s_%s" % (pf, name)
+        path = ytcfg.get("yt", "data_storage_dir")
+        with temp_cwd(path):
+            self.pf = load(pf_fn)
+            self.pf.h
+        self.name = "%s_%s" % (self.pf, name)
 
     def __call__(self):
+        nv = self.run()
+        self.result_storage[self.name] = nv
         if self.reference_storage is not None:
             ov = self.reference_storage.get(self.name, None)
+            return self.compare(nv, ov)
         else:
             ov = None
-        nv = self.run()
-        return self.compare(nv, ov)
+            return True
 
     def compare(self, new_result, old_result):
-        pass
+        raise RuntimeError
+
+    def create_obj(self, pf, obj_type):
+        # obj_type should be tuple of
+        #  ( obj_name, ( args ) )
+        if obj_type is None:
+            return pf.h.all_data()
+        cls = getattr(pf.h, obj_type[0])
+        obj = cls(*obj_type[1])
+        return obj
+
+    @property
+    def sim_center(self):
+        """
+        This returns the center of the domain.
+        """
+        return 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+
+    @property
+    def max_dens_location(self):
+        """
+        This is a helper function to return the location of the most dense
+        point.
+        """
+        return self.pf.h.find_max("Density")[1]
+
+    @property
+    def entire_simulation(self):
+        """
+        Return an unsorted array of values that cover the entire domain.
+        """
+        return self.pf.h.all_data()
+        
+class FieldValuesTest(AnswerTestingTest):
+
+    def __init__(self, name, pf_fn, field, obj_type = None):
+        super(FieldValuesTest, self).__init__(name, pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        return obj[self.field].sort()
+
+    def compare(self, new_result, old_result):
+        assert_equal(new_result, old_result)
+
+def _try_load(pf_fn):
+    try:
+        load(pf_fn)
+    except:
+        return False
+    return True
+
+def assert_fields(pf_fn, fields, data_obj = None):
+    if AnswerTestingTest.result_storage is None: return 
+    for field in fields:
+        yield FieldValuesTest("FieldValues_%s" % field, pf_fn,
+                              field, data_obj)
+
+class ProjectionValuesTest(AnswerTestingTest):
+    def __init__(self, name, pf_fn, axis, field,
+                 weight_field = None, data_source = None):
+        super(ProjectionValuesTest, self).__init__(name, pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.data_source = None
+
+    def run(self):
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field)
+        return proj
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result.field_data) == len(old_result.field_data))
+        for k in new_result.field_data:
+            assert (k in old_result.field_data)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class GridValuesTest(AnswerTestingTest):
+    def __init__(self, name, pf_fn, field):
+        super(GridValuesTest, self).__init__(name, pf_fn)
+
+    def run(self):
+        hashes = {}
+        for g in self.pf.h.grids:
+            hashes[g.id] = hashlib.md5(g[self.field].tostring()).hexdigest()
+            g.clear_data()
+        return hashes
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class TestGridHierarchy(AnswerTestingTest):
+    def run(self):
+        result = {}
+        result["grid_dimensions"] = self.pf.h.grid_dimensions
+        result["grid_left_edges"] = self.pf.h.grid_left_edge
+        result["grid_right_edges"] = self.pf.h.grid_right_edge
+        result["grid_levels"] = self.pf.h.grid_levels
+        result["grid_particle_count"] = self.pf.h.grid_particle_count
+
+    def compare(self, new_result, old_result):
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class TestParentageRelationships(AnswerTestingTest):
+    def run(self):
+        result = {}
+        result["parents"] = []
+        result["children"] = []
+        for g in self.pf.h.grids:
+            p = g.Parent
+            if p is None:
+                result["parents"].append(None)
+            elif hasattr(p, "id"):
+                result["parents"].append(p.id)
+            else:
+                result["parents"].append([pg.id for pg in p])
+            result["children"].append([c.id for c in g.Children])
+        return result
+
+    def compare(self, new_result, old_result):
+        for newp, oldp in zip(new_result["parents"], old_result["parents"]):
+            assert(newp == oldp)
+        for newc, oldc in zip(new_result["children"], old_result["children"]):
+            assert(newp == oldp)



https://bitbucket.org/yt_analysis/yt/changeset/bac81d5a63a2/
changeset:   bac81d5a63a2
branch:      yt
user:        MatthewTurk
date:        2012-10-16 09:17:20
summary:     Answer testing preliminarily works for a particular Enzo output.
affected #:  2 files

diff -r 938a408e686781812245aa094fbafda19260edcd -r bac81d5a63a2e7d3f797d1bb7d769dc7fec52443 yt/frontends/enzo/tests/test_moving7.py
--- a/yt/frontends/enzo/tests/test_moving7.py
+++ b/yt/frontends/enzo/tests/test_moving7.py
@@ -24,11 +24,24 @@
 """
 
 from yt.testing import *
-from yt.utilities.answer_testing.framework import assert_fields
+from yt.utilities.answer_testing.framework import \
+    can_run_pf, ProjectionValuesTest, FieldValuesTest
 from yt.frontends.enzo.api import EnzoStaticOutput
 
-_fields = ("Temperature", "Density", "VelocityMagnitude")
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
+           "particle_density")
+
+pf_fn = "DD0010/moving7_0010"
 
 def test_moving7():
-    for k in assert_fields("DD0010/moving7_0010", _fields):
-        yield k
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    for field in _fields:
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)


diff -r 938a408e686781812245aa094fbafda19260edcd -r bac81d5a63a2e7d3f797d1bb7d769dc7fec52443 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -32,6 +32,8 @@
 from yt.testing import *
 from yt.utilities.command_line import get_yt_version
 from yt.config import ytcfg
+from yt.utilities.logger import \
+    disable_stream_logging
 from nose.plugins import Plugin
 from yt.mods import *
 
@@ -64,6 +66,9 @@
         super(AnswerTesting, self).configure(options, conf)
         if not self.enabled:
             return
+        disable_stream_logging()
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = shelve.open(
             os.path.join(options.storage_dir,
                          options.this_name))
@@ -139,34 +144,32 @@
         
 class FieldValuesTest(AnswerTestingTest):
 
-    def __init__(self, name, pf_fn, field, obj_type = None):
+    def __init__(self, pf_fn, field, obj_type = None):
+        name = "%s_%s" % (pf_fn, field)
         super(FieldValuesTest, self).__init__(name, pf_fn)
         self.obj_type = obj_type
         self.field = field
 
     def run(self):
         obj = self.create_obj(self.pf, self.obj_type)
-        return obj[self.field].sort()
+        return np.sort(obj[self.field])
 
     def compare(self, new_result, old_result):
         assert_equal(new_result, old_result)
 
-def _try_load(pf_fn):
-    try:
-        load(pf_fn)
-    except:
-        return False
-    return True
-
-def assert_fields(pf_fn, fields, data_obj = None):
-    if AnswerTestingTest.result_storage is None: return 
-    for field in fields:
-        yield FieldValuesTest("FieldValues_%s" % field, pf_fn,
-                              field, data_obj)
+def can_run_pf(pf_fn):
+    path = ytcfg.get("yt", "data_storage_dir")
+    with temp_cwd(path):
+        try:
+            load(pf_fn)
+        except:
+            return False
+    return AnswerTestingTest.result_storage is not None
 
 class ProjectionValuesTest(AnswerTestingTest):
-    def __init__(self, name, pf_fn, axis, field,
+    def __init__(self, pf_fn, axis, field,
                  weight_field = None, data_source = None):
+        name = "%s_%s_%s_%s" % (pf_fn, axis, field, weight_field)
         super(ProjectionValuesTest, self).__init__(name, pf_fn)
         self.axis = axis
         self.field = field



https://bitbucket.org/yt_analysis/yt/changeset/d734e554f99e/
changeset:   d734e554f99e
branch:      yt
user:        MatthewTurk
date:        2012-10-18 00:10:16
summary:     Storing results in S3 now seems to work.
affected #:  2 files

diff -r bac81d5a63a2e7d3f797d1bb7d769dc7fec52443 -r d734e554f99ed498076525aea7204ea1994acb9a yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -54,7 +54,7 @@
     pasteboard_repo = '',
     reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
-    data_storage_dir = '/does/not/exist',
+    test_data_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',


diff -r bac81d5a63a2e7d3f797d1bb7d769dc7fec52443 -r d734e554f99ed498076525aea7204ea1994acb9a yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -25,9 +25,9 @@
 
 import logging
 import os
-import shelve
 import hashlib
 import contextlib
+import urllib2
 
 from yt.testing import *
 from yt.utilities.command_line import get_yt_version
@@ -36,31 +36,29 @@
     disable_stream_logging
 from nose.plugins import Plugin
 from yt.mods import *
+import cPickle
 
-log = logging.getLogger('nose.plugins.answer-testing')
+mylog = logging.getLogger('nose.plugins.answer-testing')
+
+_latest = "SomeValue"
+_url_path = "http://yt_answer_tests.s3-website-us-east-1.amazonaws.com/%s"
 
 class AnswerTesting(Plugin):
     name = "answer-testing"
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
-        test_storage_directory = ytcfg.get("yt", "test_storage_dir")
         try:
             my_hash = get_yt_version()
         except:
             my_hash = "UNKNOWN%s" % (time.time())
-        parser.add_option("--answer-parameter-file", dest="parameter_file",
-            default=os.path.join(os.getcwd(), "tests/DD0010/moving7_0010"),
-            help="The parameter file value to feed to 'load' to test against")
-        parser.add_option("--answer-output", dest="storage_dir",
-            default=test_storage_directory,
-            help="Base directory for storing test output.")
         parser.add_option("--answer-compare", dest="compare_name",
-            default=None,
-            help="The name against which we will compare")
+            default=None, help="The name against which we will compare")
         parser.add_option("--answer-name", dest="this_name",
             default=my_hash,
             help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
 
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
@@ -69,16 +67,31 @@
         disable_stream_logging()
         from yt.config import ytcfg
         ytcfg["yt","__withintesting"] = "True"
-        AnswerTestingTest.result_storage = shelve.open(
-            os.path.join(options.storage_dir,
-                         options.this_name))
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
         if options.compare_name is not None:
-            AnswerTestingTest.reference_storage = shelve.open(
-                os.path.join(options.storage_dir,
-                            options.compare_name))
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            #AnswerTestingTest.reference_storage = urllib2
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
 
     def finalize(self, result):
-        pass
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt_answer_tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
 
 @contextlib.contextmanager
 def temp_cwd(cwd):
@@ -92,7 +105,7 @@
 
     description = None
     def __init__(self, name, pf_fn):
-        path = ytcfg.get("yt", "data_storage_dir")
+        path = ytcfg.get("yt", "test_data_dir")
         with temp_cwd(path):
             self.pf = load(pf_fn)
             self.pf.h
@@ -100,7 +113,7 @@
 
     def __call__(self):
         nv = self.run()
-        self.result_storage[self.name] = nv
+        self.result_storage[str(self.pf)][self.name] = nv
         if self.reference_storage is not None:
             ov = self.reference_storage.get(self.name, None)
             return self.compare(nv, ov)
@@ -158,7 +171,7 @@
         assert_equal(new_result, old_result)
 
 def can_run_pf(pf_fn):
-    path = ytcfg.get("yt", "data_storage_dir")
+    path = ytcfg.get("yt", "test_data_dir")
     with temp_cwd(path):
         try:
             load(pf_fn)



https://bitbucket.org/yt_analysis/yt/changeset/af435ca59ecc/
changeset:   af435ca59ecc
branch:      yt
user:        MatthewTurk
date:        2012-10-18 03:59:12
summary:     Answer testing can now compare against reference values.
affected #:  2 files

diff -r d734e554f99ed498076525aea7204ea1994acb9a -r af435ca59ecccd46efe580b37a9d805ab32177e8 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -41,7 +41,27 @@
 mylog = logging.getLogger('nose.plugins.answer-testing')
 
 _latest = "SomeValue"
-_url_path = "http://yt_answer_tests.s3-website-us-east-1.amazonaws.com/%s"
+_url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
+
+class AnswerTestOpener(object):
+    def __init__(self, reference_name):
+        self.reference_name = reference_name
+        self.cache = {}
+
+    def get(self, pf_name, default = None):
+        if pf_name in self.cache: return self.cache[pf_name]
+        url = _url_path % (self.reference_name, pf_name)
+        try:
+            resp = urllib2.urlopen(url)
+            # This is dangerous, but we have a controlled S3 environment
+            data = resp.read()
+            rv = cPickle.loads(data)
+        except urllib2.HTTPError as ex:
+            raise YTNoOldAnswer(url)
+            mylog.warning("Missing %s (%s)", url, ex)
+            rv = default
+        self.cache[pf_name] = rv
+        return rv
 
 class AnswerTesting(Plugin):
     name = "answer-testing"
@@ -73,7 +93,8 @@
             # Now we grab from our S3 store
             if options.compare_name == "latest":
                 options.compare_name = _latest
-            #AnswerTestingTest.reference_storage = urllib2
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
         self.answer_name = options.this_name
         self.store_results = options.store_results
 
@@ -84,7 +105,7 @@
         import boto
         from boto.s3.key import Key
         c = boto.connect_s3()
-        bucket = c.get_bucket("yt_answer_tests")
+        bucket = c.get_bucket("yt-answer-tests")
         for pf_name in self.result_storage:
             rs = cPickle.dumps(self.result_storage[pf_name])
             tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
@@ -92,6 +113,7 @@
             k = Key(bucket)
             k.key = "%s_%s" % (self.answer_name, pf_name)
             k.set_contents_from_string(rs)
+            k.set_acl("public-read")
 
 @contextlib.contextmanager
 def temp_cwd(cwd):
@@ -100,22 +122,32 @@
     yield
     os.chdir(oldcwd)
 
+def can_run_pf(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        try:
+            load(pf_fn)
+        except:
+            return False
+    return AnswerTestingTest.result_storage is not None
+
 class AnswerTestingTest(object):
     reference_storage = None
 
     description = None
-    def __init__(self, name, pf_fn):
+    def __init__(self, pf_fn):
         path = ytcfg.get("yt", "test_data_dir")
         with temp_cwd(path):
             self.pf = load(pf_fn)
             self.pf.h
-        self.name = "%s_%s" % (self.pf, name)
 
     def __call__(self):
         nv = self.run()
         self.result_storage[str(self.pf)][self.name] = nv
         if self.reference_storage is not None:
-            ov = self.reference_storage.get(self.name, None)
+            dd = self.reference_storage.get(str(self.pf))
+            if dd is None: raise YTNoOldAnswer()
+            ov = dd[self.name]
             return self.compare(nv, ov)
         else:
             ov = None
@@ -154,56 +186,69 @@
         Return an unsorted array of values that cover the entire domain.
         """
         return self.pf.h.all_data()
+
+    @property
+    def name(self):
+        obj_type = getattr(self, "obj_type", None)
+        if obj_type is None:
+            oname = "all"
+        else:
+            oname = "_".join((str(s) for s in obj_type))
+        args = [self._type_name, str(self.pf), oname]
+        args += [str(getattr(self, an)) for an in self._attrs]
+        return "_".join(args)
         
 class FieldValuesTest(AnswerTestingTest):
+    _type_name = "FieldValues"
+    _attrs = ("field", )
 
     def __init__(self, pf_fn, field, obj_type = None):
-        name = "%s_%s" % (pf_fn, field)
-        super(FieldValuesTest, self).__init__(name, pf_fn)
+        super(FieldValuesTest, self).__init__(pf_fn)
         self.obj_type = obj_type
         self.field = field
 
     def run(self):
         obj = self.create_obj(self.pf, self.obj_type)
-        return np.sort(obj[self.field])
+        return obj[self.field]
 
     def compare(self, new_result, old_result):
         assert_equal(new_result, old_result)
 
-def can_run_pf(pf_fn):
-    path = ytcfg.get("yt", "test_data_dir")
-    with temp_cwd(path):
-        try:
-            load(pf_fn)
-        except:
-            return False
-    return AnswerTestingTest.result_storage is not None
+class ProjectionValuesTest(AnswerTestingTest):
+    _type_name = "ProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
 
-class ProjectionValuesTest(AnswerTestingTest):
-    def __init__(self, pf_fn, axis, field,
-                 weight_field = None, data_source = None):
-        name = "%s_%s_%s_%s" % (pf_fn, axis, field, weight_field)
-        super(ProjectionValuesTest, self).__init__(name, pf_fn)
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(ProjectionValuesTest, self).__init__(pf_fn)
         self.axis = axis
         self.field = field
         self.weight_field = field
-        self.data_source = None
+        self.obj_type = obj_type
 
     def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
         proj = self.pf.h.proj(self.axis, self.field,
-                              weight_field=self.weight_field)
-        return proj
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        return proj.field_data
 
     def compare(self, new_result, old_result):
-        assert(len(new_result.field_data) == len(old_result.field_data))
-        for k in new_result.field_data:
-            assert (k in old_result.field_data)
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
 class GridValuesTest(AnswerTestingTest):
+    _type_name = "GridValues"
+    _attrs = ("field",)
+
     def __init__(self, name, pf_fn, field):
-        super(GridValuesTest, self).__init__(name, pf_fn)
+        super(GridValuesTest, self).__init__(pf_fn)
 
     def run(self):
         hashes = {}
@@ -220,6 +265,9 @@
             assert_equal(new_result[k], old_result[k])
 
 class TestGridHierarchy(AnswerTestingTest):
+    _type_name = "GridHierarchy"
+    _attrs = ()
+
     def run(self):
         result = {}
         result["grid_dimensions"] = self.pf.h.grid_dimensions
@@ -233,6 +281,8 @@
             assert_equal(new_result[k], old_result[k])
 
 class TestParentageRelationships(AnswerTestingTest):
+    _type_name = "ParentageRelationships"
+    _attrs = ()
     def run(self):
         result = {}
         result["parents"] = []


diff -r d734e554f99ed498076525aea7204ea1994acb9a -r af435ca59ecccd46efe580b37a9d805ab32177e8 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -146,3 +146,11 @@
     def __str__(self):
         return "You must create an API key before uploading.  See " + \
                "https://data.yt-project.org/getting_started.html"
+
+class YTNoOldAnswer(YTException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "There is no old answer available.\n" + \
+               str(self.path)



https://bitbucket.org/yt_analysis/yt/changeset/8e2e8fe6ac49/
changeset:   8e2e8fe6ac49
branch:      yt
user:        MatthewTurk
date:        2012-10-18 04:39:24
summary:     Fixing up tests of grid hierarchy, etc, for moving7.  Related to YT-2.
affected #:  2 files

diff -r af435ca59ecccd46efe580b37a9d805ab32177e8 -r 8e2e8fe6ac49dea863dc4f40dbd1f49a066a9af3 yt/frontends/enzo/tests/test_moving7.py
--- a/yt/frontends/enzo/tests/test_moving7.py
+++ b/yt/frontends/enzo/tests/test_moving7.py
@@ -25,7 +25,9 @@
 
 from yt.testing import *
 from yt.utilities.answer_testing.framework import \
-    can_run_pf, ProjectionValuesTest, FieldValuesTest
+    can_run_pf, ProjectionValuesTest, FieldValuesTest, \
+    GridHierarchyTest, ParentageRelationshipsTest, \
+    GridValuesTest
 from yt.frontends.enzo.api import EnzoStaticOutput
 
 _fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
@@ -36,7 +38,10 @@
 def test_moving7():
     if not can_run_pf(pf_fn): return
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
     for field in _fields:
+        yield GridValuesTest(pf_fn, field)
         for axis in [0, 1, 2]:
             for ds in dso:
                 for weight_field in [None, "Density"]:


diff -r af435ca59ecccd46efe580b37a9d805ab32177e8 -r 8e2e8fe6ac49dea863dc4f40dbd1f49a066a9af3 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -247,8 +247,9 @@
     _type_name = "GridValues"
     _attrs = ("field",)
 
-    def __init__(self, name, pf_fn, field):
+    def __init__(self, pf_fn, field):
         super(GridValuesTest, self).__init__(pf_fn)
+        self.field = field
 
     def run(self):
         hashes = {}
@@ -264,7 +265,7 @@
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
-class TestGridHierarchy(AnswerTestingTest):
+class GridHierarchyTest(AnswerTestingTest):
     _type_name = "GridHierarchy"
     _attrs = ()
 
@@ -280,7 +281,7 @@
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
-class TestParentageRelationships(AnswerTestingTest):
+class ParentageRelationshipsTest(AnswerTestingTest):
     _type_name = "ParentageRelationships"
     _attrs = ()
     def run(self):



https://bitbucket.org/yt_analysis/yt/changeset/137aedf82490/
changeset:   137aedf82490
branch:      yt
user:        MatthewTurk
date:        2012-10-18 04:47:58
summary:     Adding a "standard_patch_amr" test suite function
affected #:  3 files

diff -r 8e2e8fe6ac49dea863dc4f40dbd1f49a066a9af3 -r 137aedf8249008f988b572d517493b89fb0d3951 yt/frontends/enzo/tests/test_moving7.py
--- a/yt/frontends/enzo/tests/test_moving7.py
+++ b/yt/frontends/enzo/tests/test_moving7.py
@@ -25,9 +25,8 @@
 
 from yt.testing import *
 from yt.utilities.answer_testing.framework import \
-    can_run_pf, ProjectionValuesTest, FieldValuesTest, \
-    GridHierarchyTest, ParentageRelationshipsTest, \
-    GridValuesTest
+    requires_pf, \
+    standard_patch_amr
 from yt.frontends.enzo.api import EnzoStaticOutput
 
 _fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
@@ -35,18 +34,7 @@
 
 pf_fn = "DD0010/moving7_0010"
 
+ at requires_pf(pf_fn)
 def test_moving7():
-    if not can_run_pf(pf_fn): return
-    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
-    yield GridHierarchyTest(pf_fn)
-    yield ParentageRelationshipsTest(pf_fn)
-    for field in _fields:
-        yield GridValuesTest(pf_fn, field)
-        for axis in [0, 1, 2]:
-            for ds in dso:
-                for weight_field in [None, "Density"]:
-                    yield ProjectionValuesTest(
-                        pf_fn, axis, field, weight_field,
-                        ds)
-                yield FieldValuesTest(
-                        pf_fn, field, ds)
+    for test in standard_patch_amr(pf_fn, _fields):
+        yield test


diff -r 8e2e8fe6ac49dea863dc4f40dbd1f49a066a9af3 -r 137aedf8249008f988b572d517493b89fb0d3951 yt/utilities/answer_testing/default_tests.py
--- a/yt/utilities/answer_testing/default_tests.py
+++ b/yt/utilities/answer_testing/default_tests.py
@@ -67,3 +67,4 @@
         for field in sorted(self.result):
             for p1, p2 in zip(self.result[field], old_result[field]):
                 self.compare_data_arrays(p1, p2, self.tolerance)
+


diff -r 8e2e8fe6ac49dea863dc4f40dbd1f49a066a9af3 -r 137aedf8249008f988b572d517493b89fb0d3951 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -304,3 +304,27 @@
             assert(newp == oldp)
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
+
+def requires_pf(pf_fn):
+    def ffalse(func):
+        return lambda: None
+    if not can_run_pf(pf_fn):
+        return ffalse
+    def ftrue(func):
+        return func
+    return ftrue
+
+def standard_patch_amr(pf_fn, fields):
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)



https://bitbucket.org/yt_analysis/yt/changeset/6fb61726fbb9/
changeset:   6fb61726fbb9
branch:      yt
user:        MatthewTurk
date:        2012-10-18 05:01:26
summary:     Cleaning up a bit with data_dir_load and removing a few print statements.
Updating nosetests setup.cfg entry to turn on answer testing by default.
affected #:  5 files

diff -r 137aedf8249008f988b572d517493b89fb0d3951 -r 6fb61726fbb9ac5448e9df6e40cf06c530869ae9 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -7,3 +7,5 @@
 where=yt
 exclude=answer_testing
 with-xunit=1
+with-answer-testing=1
+answer-compare=gold001


diff -r 137aedf8249008f988b572d517493b89fb0d3951 -r 6fb61726fbb9ac5448e9df6e40cf06c530869ae9 yt/frontends/enzo/tests/test_moving7.py
--- a/yt/frontends/enzo/tests/test_moving7.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""
-Enzo frontend tests using moving7
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from yt.testing import *
-from yt.utilities.answer_testing.framework import \
-    requires_pf, \
-    standard_patch_amr
-from yt.frontends.enzo.api import EnzoStaticOutput
-
-_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
-           "particle_density")
-
-pf_fn = "DD0010/moving7_0010"
-
- at requires_pf(pf_fn)
-def test_moving7():
-    for test in standard_patch_amr(pf_fn, _fields):
-        yield test


diff -r 137aedf8249008f988b572d517493b89fb0d3951 -r 6fb61726fbb9ac5448e9df6e40cf06c530869ae9 yt/frontends/enzo/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -0,0 +1,50 @@
+"""
+Enzo frontend tests using moving7
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    standard_patch_amr, \
+    data_dir_load
+from yt.frontends.enzo.api import EnzoStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
+           "particle_density")
+
+m7 = "DD0010/moving7_0010"
+ at requires_pf(m7)
+def test_moving7():
+    pf = data_dir_load(m7)
+    yield assert_equal, str(pf), "moving7_0010"
+    for test in standard_patch_amr(m7, _fields):
+        yield test
+
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+ at requires_pf(g30)
+def test_galaxy0030():
+    pf = data_dir_load(g30)
+    yield assert_equal, str(pf), "galaxy0030"
+    for test in standard_patch_amr(g30, _fields):
+        yield test


diff -r 137aedf8249008f988b572d517493b89fb0d3951 -r 6fb61726fbb9ac5448e9df6e40cf06c530869ae9 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -131,27 +131,30 @@
             return False
     return AnswerTestingTest.result_storage is not None
 
+def data_dir_load(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        pf = load(pf_fn)
+        pf.h
+        return pf
+
 class AnswerTestingTest(object):
     reference_storage = None
 
     description = None
     def __init__(self, pf_fn):
-        path = ytcfg.get("yt", "test_data_dir")
-        with temp_cwd(path):
-            self.pf = load(pf_fn)
-            self.pf.h
+        self.pf = data_dir_load(pf_fn)
 
     def __call__(self):
         nv = self.run()
-        self.result_storage[str(self.pf)][self.name] = nv
         if self.reference_storage is not None:
             dd = self.reference_storage.get(str(self.pf))
             if dd is None: raise YTNoOldAnswer()
             ov = dd[self.name]
-            return self.compare(nv, ov)
+            self.compare(nv, ov)
         else:
             ov = None
-            return True
+        self.result_storage[str(self.pf)][self.name] = nv
 
     def compare(self, new_result, old_result):
         raise RuntimeError
@@ -308,13 +311,15 @@
 def requires_pf(pf_fn):
     def ffalse(func):
         return lambda: None
+    def ftrue(func):
+        return func
     if not can_run_pf(pf_fn):
         return ffalse
-    def ftrue(func):
-        return func
-    return ftrue
+    else:
+        return ftrue
 
 def standard_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
     yield GridHierarchyTest(pf_fn)
     yield ParentageRelationshipsTest(pf_fn)


diff -r 137aedf8249008f988b572d517493b89fb0d3951 -r 6fb61726fbb9ac5448e9df6e40cf06c530869ae9 yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -29,14 +29,12 @@
 # We first create our dictionary of tests to run.  This starts out empty, and
 # as tests are imported it will be filled.
 if "TestRegistry" not in locals():
-    print "Initializing TestRegistry"
     class TestRegistry(dict):
         def __new__(cls, *p, **k):
             if not '_the_instance' in cls.__dict__:
                 cls._the_instance = dict.__new__(cls)
                 return cls._the_instance
 if "test_registry" not in locals():
-    print "Initializing test_registry"
     test_registry = TestRegistry()
 
 # The exceptions we raise, related to the character of the failure.



https://bitbucket.org/yt_analysis/yt/changeset/41d3068adff9/
changeset:   41d3068adff9
branch:      yt
user:        MatthewTurk
date:        2012-10-18 06:33:18
summary:     Disabling galaxy0030 for now, and fixing the grid hierarchy consistency test.
affected #:  2 files

diff -r 6fb61726fbb9ac5448e9df6e40cf06c530869ae9 -r 41d3068adff918404b0d86a2ab0f258bfdd318ae yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -44,6 +44,7 @@
 g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
 @requires_pf(g30)
 def test_galaxy0030():
+    return
     pf = data_dir_load(g30)
     yield assert_equal, str(pf), "galaxy0030"
     for test in standard_patch_amr(g30, _fields):


diff -r 6fb61726fbb9ac5448e9df6e40cf06c530869ae9 -r 41d3068adff918404b0d86a2ab0f258bfdd318ae yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -279,6 +279,7 @@
         result["grid_right_edges"] = self.pf.h.grid_right_edge
         result["grid_levels"] = self.pf.h.grid_levels
         result["grid_particle_count"] = self.pf.h.grid_particle_count
+        return result
 
     def compare(self, new_result, old_result):
         for k in new_result:



https://bitbucket.org/yt_analysis/yt/changeset/10ba9aa8f388/
changeset:   10ba9aa8f388
branch:      yt
user:        MatthewTurk
date:        2012-10-22 22:46:56
summary:     Splitting answert test categories into bigdata/smalldata tests.  Removing the
line in setup.cfg about comparing to gold001, but that I think should come
back.  Ensuring that nose gets the test names by updating the description
attribute for tests.
affected #:  3 files

diff -r 41d3068adff918404b0d86a2ab0f258bfdd318ae -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -8,4 +8,4 @@
 exclude=answer_testing
 with-xunit=1
 with-answer-testing=1
-answer-compare=gold001
+#answer-compare=gold001


diff -r 41d3068adff918404b0d86a2ab0f258bfdd318ae -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -26,7 +26,8 @@
 from yt.testing import *
 from yt.utilities.answer_testing.framework import \
     requires_pf, \
-    standard_patch_amr, \
+    small_patch_amr, \
+    big_patch_amr, \
     data_dir_load
 from yt.frontends.enzo.api import EnzoStaticOutput
 
@@ -38,14 +39,13 @@
 def test_moving7():
     pf = data_dir_load(m7)
     yield assert_equal, str(pf), "moving7_0010"
-    for test in standard_patch_amr(m7, _fields):
+    for test in small_patch_amr(m7, _fields):
         yield test
 
 g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
 @requires_pf(g30)
 def test_galaxy0030():
-    return
     pf = data_dir_load(g30)
     yield assert_equal, str(pf), "galaxy0030"
-    for test in standard_patch_amr(g30, _fields):
+    for test in big_patch_amr(g30, _fields):
         yield test


diff -r 41d3068adff918404b0d86a2ab0f258bfdd318ae -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -140,8 +140,6 @@
 
 class AnswerTestingTest(object):
     reference_storage = None
-
-    description = None
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
@@ -150,11 +148,11 @@
         if self.reference_storage is not None:
             dd = self.reference_storage.get(str(self.pf))
             if dd is None: raise YTNoOldAnswer()
-            ov = dd[self.name]
+            ov = dd[self.descrption]
             self.compare(nv, ov)
         else:
             ov = None
-        self.result_storage[str(self.pf)][self.name] = nv
+        self.result_storage[str(self.pf)][self.description] = nv
 
     def compare(self, new_result, old_result):
         raise RuntimeError
@@ -191,7 +189,7 @@
         return self.pf.h.all_data()
 
     @property
-    def name(self):
+    def description(self):
         obj_type = getattr(self, "obj_type", None)
         if obj_type is None:
             oname = "all"
@@ -212,7 +210,10 @@
 
     def run(self):
         obj = self.create_obj(self.pf, self.obj_type)
-        return obj[self.field]
+        avg = obj.quantities["WeightedAverageQuantity"](self.field,
+                             weight="Ones")
+        (mi, ma), = obj.quantities["Extrema"](self.field)
+        return np.array([avg, mi, ma])
 
     def compare(self, new_result, old_result):
         assert_equal(new_result, old_result)
@@ -246,6 +247,41 @@
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
+class PixelizedProjectionValuesTest(AnswerTestingTest):
+    _type_name = "PixelizedProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(PixelizedProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        frb = proj.to_frb((1.0, 'unitary'), 256)
+        frb[self.field]
+        frb[self.weight_field]
+        d = frb.data
+        d.update( dict( (("%s_sum" % f, proj[f].sum(dtype="float64"))
+                         for f in proj.field_data.keys()) ) )
+        return d
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_rel_equal(new_result[k], old_result[k], 10)
+
 class GridValuesTest(AnswerTestingTest):
     _type_name = "GridValues"
     _attrs = ("field",)
@@ -319,7 +355,7 @@
     else:
         return ftrue
 
-def standard_patch_amr(pf_fn, fields):
+def small_patch_amr(pf_fn, fields):
     if not can_run_pf(pf_fn): return
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
     yield GridHierarchyTest(pf_fn)
@@ -334,3 +370,17 @@
                         ds)
                 yield FieldValuesTest(
                         pf_fn, field, ds)
+
+def big_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)



https://bitbucket.org/yt_analysis/yt/changeset/65afdb7dbe7d/
changeset:   65afdb7dbe7d
branch:      yt
user:        MatthewTurk
date:        2012-10-22 22:47:34
summary:     Merging
affected #:  28 files

diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
-include distribute_setup.py
+include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.hh *.h README* 
+recursive-include yt *.pyx *.pxd *.hh *.h README*


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -31,7 +31,7 @@
 from yt.funcs import *
 from yt.utilities.performance_counters import yt_counters, time_function
 try:
-    from yt.utilities.kdtree import \
+    from yt.utilities.kdtree.api import \
         chainHOP_tags_dens, \
         create_tree, fKD, find_nn_nearest_neighbors, \
         free_tree, find_chunk_nearest_neighbors


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -606,6 +606,7 @@
 
         if newProfile:
             mylog.info("Writing halo %d" % halo['id'])
+            if os.path.exists(filename): os.remove(filename)
             if filename.endswith('.h5'):
                 profile.write_out_h5(filename)
             else:
@@ -717,7 +718,9 @@
             Default=True.
         njobs : int
             The number of jobs over which to split the projections.  Set
-            to -1 so that each halo is done by a single processor.
+            to -1 so that each halo is done by a single processor.  Halo 
+            projections do not currently work in parallel, so this must 
+            be set to -1.
             Default: -1.
         dynamic : bool
             If True, distribute halos using a task queue.  If False,
@@ -731,6 +734,12 @@
 
         """
 
+        # Halo projections cannot run in parallel because they are done by 
+        # giving a data source to the projection object.
+        if njobs > 0:
+            mylog.warn("Halo projections cannot use more than one processor per halo, setting njobs to -1.")
+            njobs = -1
+        
         # Get list of halos for projecting.
         if halo_list == 'filtered':
             halo_projection_list = self.filtered_halos


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -30,7 +30,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import ParallelAnalysisInterface, parallel_blocking_call, parallel_root_only
 
 try:
-    from yt.utilities.kdtree import *
+    from yt.utilities.kdtree.api import *
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -237,6 +237,7 @@
     def __set_default_field_parameters(self):
         self.set_field_parameter("center",np.zeros(3,dtype='float64'))
         self.set_field_parameter("bulk_velocity",np.zeros(3,dtype='float64'))
+        self.set_field_parameter("normal",np.array([0,0,1],dtype='float64'))
 
     def _set_center(self, center):
         if center is None:


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -598,16 +598,16 @@
                     continue
             else:
                 nz_filter = None
-            mins.append(data[field][nz_filter].min())
-            maxs.append(data[field][nz_filter].max())
+            mins.append(np.nanmin(data[field][nz_filter]))
+            maxs.append(np.nanmax(data[field][nz_filter]))
         else:
             if this_filter.any():
                 if non_zero:
                     nz_filter = ((this_filter) &
                                  (data[field][this_filter] > 0.0))
                 else: nz_filter = this_filter
-                mins.append(data[field][nz_filter].min())
-                maxs.append(data[field][nz_filter].max())
+                mins.append(np.nanmin(data[field][nz_filter]))
+                maxs.append(np.nanmax(data[field][nz_filter]))
             else:
                 mins.append(1e90)
                 maxs.append(-1e90)


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -160,7 +160,8 @@
             # required attrs
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
-                pf.hubble_constant = pf.cosmological_simulation = 0.0
+                pf.cosmological_simulation = 0.0
+            pf.hubble_constant = 0.7
             pf.domain_left_edge = np.zeros(3, 'float64')
             pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/tests/test_derived_quantities.py
--- /dev/null
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -0,0 +1,24 @@
+from yt.testing import *
+import numpy as np
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_extrema():
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(16, nprocs = nprocs, fields = ("Density",
+                "x-velocity", "y-velocity", "z-velocity"))
+        sp = pf.h.sphere("c", (0.25, '1'))
+        (mi, ma), = sp.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(sp["Density"])
+        yield assert_equal, ma, np.nanmax(sp["Density"])
+        dd = pf.h.all_data()
+        (mi, ma), = dd.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(dd["Density"])
+        yield assert_equal, ma, np.nanmax(dd["Density"])
+        sp = pf.h.sphere("max", (0.25, '1'))
+        yield assert_equal, np.any(np.isnan(sp["RadialVelocity"])), True
+        (mi, ma), = dd.quantities["Extrema"]("RadialVelocity")
+        yield assert_equal, mi, np.nanmin(dd["RadialVelocity"])
+        yield assert_equal, ma, np.nanmax(dd["RadialVelocity"])


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/tests/test_extract_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -0,0 +1,53 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_cut_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        r = dd.cut_region( [ "grid['Temperature'] > 0.5",
+                             "grid['Density'] < 0.75",
+                             "grid['x-velocity'] > 0.25" ])
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        t2 = (r["Temperature"] < 0.75)
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+
+def test_extract_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        r = dd.extract_region(t)
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        t2 = (r["Temperature"] < 0.75)
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+        t3 = (r["Temperature"] < 0.75)
+        r3 = r.extract_region( t3 )
+        yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
+        yield assert_equal, np.all(r3["Temperature"] < 0.75), True


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/tests/test_fields.py
--- /dev/null
+++ b/yt/data_objects/tests/test_fields.py
@@ -0,0 +1,91 @@
+from yt.testing import *
+import numpy as np
+from yt.data_objects.field_info_container import \
+    FieldInfo
+import yt.data_objects.universal_fields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    np.seterr(all = 'ignore')
+
+_sample_parameters = dict(
+    axis = 0,
+    center = np.array((0.0, 0.0, 0.0)),
+    bulk_velocity = np.array((0.0, 0.0, 0.0)),
+    normal = np.array((0.0, 0.0, 1.0)),
+    cp_x_vec = np.array((1.0, 0.0, 0.0)),
+    cp_y_vec = np.array((0.0, 1.0, 0.0)),
+    cp_z_vec = np.array((0.0, 0.0, 1.0)),
+)
+
+_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
+
+def realistic_pf(fields, nprocs):
+    pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
+    pf.parameters["HydroMethod"] = "streaming"
+    pf.parameters["Gamma"] = 5.0/3.0
+    pf.parameters["EOSType"] = 1.0
+    pf.parameters["EOSSoundSpeed"] = 1.0
+    pf.conversion_factors["Time"] = 1.0
+    pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
+    pf.current_redshift = 0.0001
+    pf.hubble_constant = 0.7
+    for unit in mpc_conversion:
+        pf.units[unit+'h'] = pf.units[unit]
+        pf.units[unit+'cm'] = pf.units[unit]
+        pf.units[unit+'hcm'] = pf.units[unit]
+    return pf
+
+class TestFieldAccess(object):
+    description = None
+
+    def __init__(self, field_name, nproc):
+        # Note this should be a field name
+        self.field_name = field_name
+        self.description = "Accessing_%s_%s" % (field_name, nproc)
+        self.nproc = nproc
+
+    def __call__(self):
+        field = FieldInfo[self.field_name]
+        deps = field.get_dependencies()
+        fields = deps.requested + _base_fields
+        skip_grids = False
+        needs_spatial = False
+        for v in field.validators:
+            f = getattr(v, "fields", None)
+            if f: fields += f
+            if getattr(v, "ghost_zones", 0) > 0:
+                skip_grids = True
+            if hasattr(v, "ghost_zones"):
+                needs_spatial = True
+        pf = realistic_pf(fields, self.nproc)
+        # This gives unequal sized grids as well as subgrids
+        dd1 = pf.h.all_data()
+        dd2 = pf.h.all_data()
+        dd1.field_parameters.update(_sample_parameters)
+        dd2.field_parameters.update(_sample_parameters)
+        v1 = dd1[self.field_name]
+        conv = field._convert_function(dd1) or 1.0
+        if not needs_spatial:
+            assert_equal(v1, conv*field._function(field, dd2))
+        if not skip_grids:
+            for g in pf.h.grids:
+                g.field_parameters.update(_sample_parameters)
+                conv = field._convert_function(g) or 1.0
+                v1 = g[self.field_name]
+                g.clear_data()
+                g.field_parameters.update(_sample_parameters)
+                assert_equal(v1, conv*field._function(field, g))
+
+def test_all_fields():
+    for field in FieldInfo:
+        if field.startswith("CuttingPlane"): continue
+        if field.startswith("particle"): continue
+        if field.startswith("CIC"): continue
+        if field.startswith("WeakLensingConvergence"): continue
+        if FieldInfo[field].particle_type: continue
+        for nproc in [1, 4, 8]:
+            yield TestFieldAccess(field, nproc)


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/tests/test_ortho_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -0,0 +1,25 @@
+from yt.testing import *
+
+def test_ortho_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
+
+    axes = ['x', 'y', 'z']
+    for ax, an in enumerate(axes):
+        ocoord = np.random.random(2)
+
+        my_oray = pf.h.ortho_ray(ax, ocoord)
+
+        my_axes = range(3)
+        del my_axes[ax]
+
+        # find the cells intersected by the ortho ray
+        my_all = pf.h.all_data()
+        my_cells = (np.abs(my_all[axes[my_axes[0]]] - ocoord[0]) <= 
+                    0.5 * dx[my_axes[0]]) & \
+                   (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
+                    0.5 * dx[my_axes[1]])
+
+        assert_equal(my_oray['Density'].sum(),
+                     my_all['Density'][my_cells].sum())


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/tests/test_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_rays.py
@@ -0,0 +1,31 @@
+from yt.testing import *
+
+def test_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+      pf.domain_dimensions
+
+    p1 = np.random.random(3)
+    p2 = np.random.random(3)
+
+    my_ray = pf.h.ray(p1, p2)
+    assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
+    ray_cells = my_ray['dts'] > 0
+
+    # find cells intersected by the ray
+    my_all = pf.h.all_data()
+    
+    dt = np.abs(dx / (p2 - p1))
+    tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
+                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
+                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
+    tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
+                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
+                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
+    tin = tin.max(axis=0)
+    tout = tout.min(axis=0)
+    my_cells = (tin < tout) & (tin < 1) & (tout > 0)
+
+    assert_rel_equal(ray_cells.sum(), my_cells.sum(), 14)
+    assert_rel_equal(my_ray['Density'][ray_cells].sum(),
+                     my_all['Density'][my_cells].sum(), 14)


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -258,8 +258,11 @@
 
         """
         if isinstance(filenames, types.StringTypes):
+            pattern = filenames
             filenames = glob.glob(filenames)
             filenames.sort()
+            if len(filenames) == 0:
+                raise YTNoFilenamesMatchPattern(pattern)
         obj = cls(filenames[:], parallel = parallel)
         return obj
 


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -32,7 +32,7 @@
 
 from yt.funcs import *
 
-from yt.utilities.lib import CICDeposit_3, obtain_rvec
+from yt.utilities.lib import CICDeposit_3, obtain_rvec, obtain_rv_vec
 from yt.utilities.cosmology import Cosmology
 from field_info_container import \
     add_field, \
@@ -54,7 +54,19 @@
      kboltz, \
      G, \
      rho_crit_now, \
-     speed_of_light_cgs
+     speed_of_light_cgs, \
+     km_per_cm
+
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
      
 # Note that, despite my newfound efforts to comply with PEP-8,
 # I violate it here in order to keep the name/func_name relationship
@@ -179,12 +191,8 @@
 
 def _VelocityMagnitude(field, data):
     """M{|v|}"""
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    return ( (data["x-velocity"]-bulk_velocity[0])**2.0 + \
-             (data["y-velocity"]-bulk_velocity[1])**2.0 + \
-             (data["z-velocity"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
+    velocities = obtain_rv_vec(data)
+    return np.sqrt(np.sum(velocities**2,axis=0))
 add_field("VelocityMagnitude", function=_VelocityMagnitude,
           take_log=False, units=r"\rm{cm}/\rm{s}")
 
@@ -194,13 +202,6 @@
           function=_TangentialOverVelocityMagnitude,
           take_log=False)
 
-def _TangentialVelocity(field, data):
-    return np.sqrt(data["VelocityMagnitude"]**2.0
-                 - data["RadialVelocity"]**2.0)
-add_field("TangentialVelocity", 
-          function=_TangentialVelocity,
-          take_log=False, units=r"\rm{cm}/\rm{s}")
-
 def _Pressure(field, data):
     """M{(Gamma-1.0)*rho*E}"""
     return (data.pf["Gamma"] - 1.0) * \
@@ -223,14 +224,9 @@
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
       
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The spherical coordinates radius is simply the magnitude of the
-    ## coords vector.
-
-    return np.sqrt(np.sum(coords**2,axis=-1))
+    return get_sph_r(coords)
 
 def _Convert_sph_r_CGS(data):
    return data.convert("cm")
@@ -245,20 +241,9 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The angle (theta) with respect to the normal (J), is the arccos
-    ## of the dot product of the normal with the normalized coords
-    ## vector.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    JdotCoords = np.sum(J*coords,axis=-1)
-    
-    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
+    return get_sph_theta(coords, normal)
 
 add_field("sph_theta", function=_sph_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
@@ -269,54 +254,21 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
-    
-    ## We have freedom with respect to what axis (xprime) to define
-    ## the disk angle. Here I've chosen to use the axis that is
-    ## perpendicular to the normal and the y-axis. When normal ==
-    ## y-hat, then set xprime = z-hat. With this definition, when
-    ## normal == z-hat (as is typical), then xprime == x-hat.
-    ##
-    ## The angle is then given by the arctan of the ratio of the
-    ## yprime-component and the xprime-component of the coords vector.
+    coords = obtain_rvec(data)
 
-    xprime = np.cross([0.0,1.0,0.0],normal)
-    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
-    yprime = np.cross(normal,xprime)
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
-    
-    Px = np.sum(Jx*coords,axis=-1)
-    Py = np.sum(Jy*coords,axis=-1)
-    
-    return np.arctan2(Py,Px)
+    return get_sph_phi(coords, normal)
 
 add_field("sph_phi", function=_sph_phi,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
 
-
-
 ### cylindrical coordinates: R (radius in the cylinder's plane)
 def _cyl_R(field, data):
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
       
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The cross product of the normal (J) with the coords vector
-    ## gives a vector of magnitude equal to the cylindrical radius.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    JcrossCoords = np.cross(J,coords)
-    return np.sqrt(np.sum(JcrossCoords**2,axis=-1))
+    return get_cyl_r(coords, normal)
 
 def _Convert_cyl_R_CGS(data):
    return data.convert("cm")
@@ -324,6 +276,9 @@
 add_field("cyl_R", function=_cyl_R,
          validators=[ValidateParameter("center"),ValidateParameter("normal")],
          convert_function = _Convert_cyl_R_CGS, units=r"\rm{cm}")
+add_field("cyl_RCode", function=_cyl_R,
+          validators=[ValidateParameter("center"),ValidateParameter("normal")],
+          units=r"Radius (code)")
 
 
 ### cylindrical coordinates: z (height above the cylinder's plane)
@@ -331,17 +286,9 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The dot product of the normal (J) with the coords vector gives
-    ## the cylindrical height.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    return np.sum(J*coords,axis=-1)  
+    return get_cyl_z(coords, normal)
 
 def _Convert_cyl_z_CGS(data):
    return data.convert("cm")
@@ -352,14 +299,17 @@
 
 
 ### cylindrical coordinates: theta (angle in the cylinder's plane)
-### [This is identical to the spherical coordinate's 'phi' angle.]
 def _cyl_theta(field, data):
-    return data['sph_phi']
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = obtain_rvec(data)
+
+    return get_cyl_theta(coords, normal)
 
 add_field("cyl_theta", function=_cyl_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
 
-
 ### The old field DiskAngle is the same as the spherical coordinates'
 ### 'theta' angle. I'm keeping DiskAngle for backwards compatibility.
 def _DiskAngle(field, data):
@@ -392,6 +342,54 @@
                       ValidateParameter("normal")],
           units=r"AU", display_field=False)
 
+def _cyl_RadialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)
+
+    theta = data['cyl_theta']
+
+    return get_cyl_r_component(velocities, theta, normal)
+
+def _cyl_RadialVelocityABS(field, data):
+    return np.abs(_cyl_RadialVelocity(field, data))
+def _Convert_cyl_RadialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_RadialVelocity", function=_cyl_RadialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityABS", function=_cyl_RadialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMS", function=_cyl_RadialVelocity,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMSABS", function=_cyl_RadialVelocityABS,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+
+def _cyl_TangentialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)
+    theta = data['cyl_theta']
+
+    return get_cyl_theta_component(velocities, theta, normal)
+
+def _cyl_TangentialVelocityABS(field, data):
+    return np.abs(_cyl_TangentialVelocity(field, data))
+def _Convert_cyl_TangentialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_TangentialVelocity", function=_cyl_TangentialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityABS", function=_cyl_TangentialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMS", function=_cyl_TangentialVelocity,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMSABS", function=_cyl_TangentialVelocityABS,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
 
 def _DynamicalTime(field, data):
     """
@@ -450,7 +448,7 @@
 
 # This is rho_total / rho_cr(z).
 def _Convert_Overdensity(data):
-    return 1 / (rho_crit_now * data.pf.hubble_constant**2 * 
+    return 1.0 / (rho_crit_now * data.pf.hubble_constant**2 * 
                 (1+data.pf.current_redshift)**3)
 add_field("Overdensity",function=_Matter_Density,
           convert_function=_Convert_Overdensity, units=r"")
@@ -470,8 +468,8 @@
     else:
         omega_baryon_now = 0.0441
     return data['Density'] / (omega_baryon_now * rho_crit_now * 
-                              (data.pf['CosmologyHubbleConstantNow']**2) * 
-                              ((1+data.pf['CosmologyCurrentRedshift'])**3))
+                              (data.pf.hubble_constant**2) * 
+                              ((1+data.pf.current_redshift)**3))
 add_field("Baryon_Overdensity", function=_Baryon_Overdensity, 
           units=r"")
 
@@ -640,13 +638,7 @@
           take_log=False, display_field=False)
 
 def obtain_velocities(data):
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = np.zeros(3, dtype='float64')
-    xv = data["x-velocity"] - bv[0]
-    yv = data["y-velocity"] - bv[1]
-    zv = data["z-velocity"] - bv[2]
-    return xv, yv, zv
+    return obtain_rv_vec(data)
 
 def _convertSpecificAngularMomentum(data):
     return data.convert("cm")
@@ -711,7 +703,7 @@
 #          convert_function=_convertSpecificAngularMomentum, vector_field=True,
 #          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter('center')])
 def _convertSpecificAngularMomentumKMSMPC(data):
-    return data.convert("mpc")/1e5
+    return km_per_cm*data.convert("mpc")
 #add_field("ParticleSpecificAngularMomentumKMSMPC",
 #          function=_ParticleSpecificAngularMomentum, particle_type=True,
 #          convert_function=_convertSpecificAngularMomentumKMSMPC, vector_field=True,
@@ -883,33 +875,32 @@
           display_name = "Radius (code)")
 
 def _RadialVelocity(field, data):
-    center = data.get_field_parameter("center")
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    new_field = ( (data['x']-center[0])*(data["x-velocity"]-bulk_velocity[0])
-                + (data['y']-center[1])*(data["y-velocity"]-bulk_velocity[1])
-                + (data['z']-center[2])*(data["z-velocity"]-bulk_velocity[2])
-                )/data["RadiusCode"]
-    if np.any(np.isnan(new_field)): # to fix center = point
-        new_field[np.isnan(new_field)] = 0.0
-    return new_field
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)    
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_r_component(velocities, theta, phi, normal)
+
 def _RadialVelocityABS(field, data):
     return np.abs(_RadialVelocity(field, data))
 def _ConvertRadialVelocityKMS(data):
-    return 1e-5
+    return km_per_cm
 add_field("RadialVelocity", function=_RadialVelocity,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityABS", function=_RadialVelocityABS,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityKMS", function=_RadialVelocity,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
 add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
+
+def _TangentialVelocity(field, data):
+    return np.sqrt(data["VelocityMagnitude"]**2.0
+                 - data["RadialVelocity"]**2.0)
+add_field("TangentialVelocity", 
+          function=_TangentialVelocity,
+          take_log=False, units=r"\rm{cm}/\rm{s}")
 
 def _CuttingPlaneVelocityX(field, data):
     x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
@@ -1026,6 +1017,47 @@
           display_name=r"\rm{Magnetic}\/\rm{Energy}",
           units="\rm{ergs}\/\rm{cm}^{-3}")
 
+def _BPoloidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_theta_component(Bfields, theta, phi, normal)
+
+add_field("BPoloidal", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BToroidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    phi   = data['sph_phi']
+
+    return get_sph_phi_component(Bfields, phi, normal)
+
+add_field("BToroidal", function=_BToroidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BRadial(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_r_component(Bfields, theta, phi, normal)
+
+add_field("BRadial", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
 def _VorticitySquared(field, data):
     mylog.debug("Generating vorticity on %s", data)
     # We need to set up stencils


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -26,10 +26,10 @@
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
-    assert_string_equal
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
 
-def assert_rel_equal(a1, a2, decimels):
-    return assert_almost_equal(a1/a2, 1.0, decimels)
+def assert_rel_equal(a1, a2, decimals):
+    return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 
@@ -139,11 +139,16 @@
         ndims = [ndims, ndims, ndims]
     else:
         assert(len(ndims) == 3)
-    if negative:
-        offset = 0.5
-    else:
-        offset = 0.0
+    if not iterable(negative):
+        negative = [negative for f in fields]
+    assert(len(fields) == len(negative))
+    offsets = []
+    for n in negative:
+        if n:
+            offsets.append(0.5)
+        else:
+            offsets.append(0.0)
     data = dict((field, (np.random.random(ndims) - offset) * peak_value)
-                 for field in fields)
+                 for field,offset in zip(fields,offsets))
     ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
     return ug


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -68,9 +68,12 @@
 def evaluate_domain_decomposition(n_d, pieces, ldom):
     """ Evaluate longest to shortest edge ratio
         BEWARE: lot's of magic here """
-    ideal_bsize = 3.0 * (pieces * np.product(n_d) ** 2) ** (1.0 / 3.0)
-    bsize = int(np.sum(
-        ldom / np.array(n_d, dtype=np.float64) * np.product(n_d)))
+    eff_dim = (n_d > 1).sum()
+    ideal_bsize = eff_dim * (pieces * np.product(n_d) ** (eff_dim - 1)
+                             ) ** (1.0 / eff_dim)
+    mask = np.where(n_d > 1)
+    nd_arr = np.array(n_d, dtype=np.float64)[mask]
+    bsize = int(np.sum(ldom[mask] / nd_arr * np.product(nd_arr)))
     load_balance = float(np.product(n_d)) / \
         (float(pieces) * np.product((n_d - 1) / ldom + 1))
 
@@ -134,23 +137,15 @@
 
 
 def split_array(tab, psize):
-    """ Split array into px*py*pz subarrays using internal numpy routine. """
-    temp = [np.array_split(array, psize[1], axis=1)
-            for array in np.array_split(tab, psize[2], axis=2)]
-    temp = [item for sublist in temp for item in sublist]
-    temp = [np.array_split(array, psize[0], axis=0) for array in temp]
-    temp = [item for sublist in temp for item in sublist]
-    return temp
-
-
-if __name__ == "__main__":
-
-    NPROC = 12
-    ARRAY = np.zeros((128, 128, 129))
-    BBOX = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
-
-    PROCS = get_psize(np.array(ARRAY.shape), NPROC)
-    LE, RE, DATA = decompose_array(ARRAY, PROCS, BBOX)
-
-    for idx in range(NPROC):
-        print LE[idx, :], RE[idx, :], DATA[idx].shape
+    """ Split array into px*py*pz subarrays. """
+    n_d = np.array(tab.shape, dtype=np.int64)
+    slices = []
+    for i in range(psize[0]):
+        for j in range(psize[1]):
+            for k in range(psize[2]):
+                piece = np.array((i, j, k), dtype=np.int64)
+                lei = n_d * piece / psize
+                rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
+                slices.append(np.s_[lei[0]:rei[0], lei[1]:
+                                    rei[1], lei[2]:rei[2]])
+    return [tab[slc] for slc in slices]


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -147,6 +147,14 @@
         return "You must create an API key before uploading.  See " + \
                "https://data.yt-project.org/getting_started.html"
 
+class YTNoFilenamesMatchPattern(YTException):
+    def __init__(self, pattern):
+        self.pattern = pattern
+
+    def __str__(self):
+        return "No filenames were found to match the pattern: " + \
+               "'%s'" % (self.pattern)
+
 class YTNoOldAnswer(YTException):
     def __init__(self, path):
         self.path = path


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/kdtree/__init__.py
--- a/yt/utilities/kdtree/__init__.py
+++ b/yt/utilities/kdtree/__init__.py
@@ -1,1 +0,0 @@
-from fKDpy import *


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/kdtree/api.py
--- /dev/null
+++ b/yt/utilities/kdtree/api.py
@@ -0,0 +1,9 @@
+from fKDpy import \
+    chainHOP_tags_dens, \
+    create_tree, \
+    fKD, \
+    find_nn_nearest_neighbors, \
+    free_tree, \
+    find_chunk_nearest_neighbors
+
+


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -338,3 +338,47 @@
                     rg[2,i,j,k] = zg[i,j,k] - c[2]
         return rg
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def obtain_rv_vec(data):
+    # This is just to let the pointers exist and whatnot.  We can't cdef them
+    # inside conditionals.
+    cdef np.ndarray[np.float64_t, ndim=1] vxf
+    cdef np.ndarray[np.float64_t, ndim=1] vyf
+    cdef np.ndarray[np.float64_t, ndim=1] vzf
+    cdef np.ndarray[np.float64_t, ndim=2] rvf
+    cdef np.ndarray[np.float64_t, ndim=3] vxg
+    cdef np.ndarray[np.float64_t, ndim=3] vyg
+    cdef np.ndarray[np.float64_t, ndim=3] vzg
+    cdef np.ndarray[np.float64_t, ndim=4] rvg
+    cdef np.float64_t bv[3]
+    cdef int i, j, k
+    bulk_velocity = data.get_field_parameter("bulk_velocity")
+    if bulk_velocity == None:
+        bulk_velocity = np.zeros(3)
+    bv[0] = bulk_velocity[0]; bv[1] = bulk_velocity[1]; bv[2] = bulk_velocity[2]
+    if len(data['x-velocity'].shape) == 1:
+        # One dimensional data
+        vxf = data['x-velocity'].astype("float64")
+        vyf = data['y-velocity'].astype("float64")
+        vzf = data['z-velocity'].astype("float64")
+        rvf = np.empty((3, vxf.shape[0]), 'float64')
+        for i in range(vxf.shape[0]):
+            rvf[0, i] = vxf[i] - bv[0]
+            rvf[1, i] = vyf[i] - bv[1]
+            rvf[2, i] = vzf[i] - bv[2]
+        return rvf
+    else:
+        # Three dimensional data
+        vxg = data['x-velocity'].astype("float64")
+        vyg = data['y-velocity'].astype("float64")
+        vzg = data['z-velocity'].astype("float64")
+        rvg = np.empty((3, vxg.shape[0], vxg.shape[1], vxg.shape[2]), 'float64')
+        for i in range(vxg.shape[0]):
+            for j in range(vxg.shape[1]):
+                for k in range(vxg.shape[2]):
+                    rvg[0,i,j,k] = vxg[i,j,k] - bv[0]
+                    rvg[1,i,j,k] = vyg[i,j,k] - bv[1]
+                    rvg[2,i,j,k] = vzg[i,j,k] - bv[2]
+        return rvg


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -233,49 +233,6 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def obtain_rvec(data):
-    # This is just to let the pointers exist and whatnot.  We can't cdef them
-    # inside conditionals.
-    cdef np.ndarray[np.float64_t, ndim=1] xf
-    cdef np.ndarray[np.float64_t, ndim=1] yf
-    cdef np.ndarray[np.float64_t, ndim=1] zf
-    cdef np.ndarray[np.float64_t, ndim=2] rf
-    cdef np.ndarray[np.float64_t, ndim=3] xg
-    cdef np.ndarray[np.float64_t, ndim=3] yg
-    cdef np.ndarray[np.float64_t, ndim=3] zg
-    cdef np.ndarray[np.float64_t, ndim=4] rg
-    cdef np.float64_t c[3]
-    cdef int i, j, k
-    center = data.get_field_parameter("center")
-    c[0] = center[0]; c[1] = center[1]; c[2] = center[2]
-    if len(data['x'].shape) == 1:
-        # One dimensional data
-        xf = data['x']
-        yf = data['y']
-        zf = data['z']
-        rf = np.empty((3, xf.shape[0]), 'float64')
-        for i in range(xf.shape[0]):
-            rf[0, i] = xf[i] - c[0]
-            rf[1, i] = yf[i] - c[1]
-            rf[2, i] = zf[i] - c[2]
-        return rf
-    else:
-        # Three dimensional data
-        xg = data['x']
-        yg = data['y']
-        zg = data['z']
-        rg = np.empty((3, xg.shape[0], xg.shape[1], xg.shape[2]), 'float64')
-        for i in range(xg.shape[0]):
-            for j in range(xg.shape[1]):
-                for k in range(xg.shape[2]):
-                    rg[0,i,j,k] = xg[i,j,k] - c[0]
-                    rg[1,i,j,k] = yg[i,j,k] - c[1]
-                    rg[2,i,j,k] = zg[i,j,k] - c[2]
-        return rg
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
 def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data,
                        np.ndarray[np.float64_t, ndim=1] l_corner,
                        np.ndarray[np.float64_t, ndim=1] r_corner):


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/lib/tests/test_geometry_utils.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_geometry_utils.py
@@ -0,0 +1,30 @@
+from yt.testing import *
+from yt.utilities.lib import obtain_rvec, obtain_rv_vec
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_obtain_rvec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+    
+    dd = pf.h.sphere((0.5,0.5,0.5), 0.2)
+
+    coords = obtain_rvec(dd)
+
+    r = np.sqrt(np.sum(coords*coords,axis=0))
+
+    assert_array_less(r.max(), 0.2)
+
+    assert_array_less(0.0, r.min())
+
+def test_obtain_rv_vec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+
+    dd = pf.h.all_data()
+
+    vels = obtain_rv_vec(dd)
+
+    assert_array_equal(vels[0,:], dd['x-velocity'])
+    assert_array_equal(vels[1,:], dd['y-velocity'])
+    assert_array_equal(vels[2,:], dd['z-velocity'])


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -674,3 +674,191 @@
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
     
     return R
+
+def get_ortho_basis(normal):
+    xprime = np.cross([0.0,1.0,0.0],normal)
+    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
+    yprime = np.cross(normal,xprime)
+    zprime = normal
+    return (xprime, yprime, zprime)
+
+def get_sph_r(coords):
+    # The spherical coordinates radius is simply the magnitude of the
+    # coordinate vector.
+
+    return np.sqrt(np.sum(coords**2, axis=0))
+
+def resize_vector(vector,vector_array):
+    if len(vector_array.shape) == 4:
+        res_vector = np.resize(vector,(3,1,1,1))
+    else:
+        res_vector = np.resize(vector,(3,1))
+    return res_vector
+
+def get_sph_theta(coords, normal):
+    # The angle (theta) with respect to the normal (J), is the arccos
+    # of the dot product of the normal with the normalized coordinate
+    # vector.
+    
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    
+    J = np.tile(res_normal,tile_shape)
+
+    JdotCoords = np.sum(J*coords,axis=0)
+    
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=0)) )
+
+def get_sph_phi(coords, normal):
+    # We have freedom with respect to what axis (xprime) to define
+    # the disk angle. Here I've chosen to use the axis that is
+    # perpendicular to the normal and the y-axis. When normal ==
+    # y-hat, then set xprime = z-hat. With this definition, when
+    # normal == z-hat (as is typical), then xprime == x-hat.
+    #
+    # The angle is then given by the arctan of the ratio of the
+    # yprime-component and the xprime-component of the coordinate 
+    # vector.
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, coords)
+    res_yprime = resize_vector(yprime, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    Px = np.sum(Jx*coords,axis=0)
+    Py = np.sum(Jy*coords,axis=0)
+    
+    return np.arctan2(Py,Px)
+
+def get_cyl_r(coords, normal):
+    # The cross product of the normal (J) with a coordinate vector
+    # gives a vector of magnitude equal to the cylindrical radius.
+
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
+    
+    JcrossCoords = np.cross(J, coords, axisa=0, axisb=0, axisc=0)
+    return np.sqrt(np.sum(JcrossCoords**2, axis=0))
+
+def get_cyl_z(coords, normal):
+    # The dot product of the normal (J) with the coordinate vector 
+    # gives the cylindrical height.
+
+    res_normal = resize_vector(normal, coords)
+    
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
+
+    return np.sum(J*coords, axis=0)  
+
+def get_cyl_theta(coords, normal):
+    # This is identical to the spherical phi component
+
+    return get_sph_phi(coords, normal)
+
+
+def get_cyl_r_component(vectors, theta, normal):
+    # The r of a vector is the vector dotted with rhat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    rhat = Jx*np.cos(theta) + Jy*np.sin(theta)
+
+    return np.sum(vectors*rhat,axis=0)
+
+def get_cyl_theta_component(vectors, theta, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    thetahat = -Jx*np.sin(theta) + Jy*np.cos(theta)
+
+    return np.sum(vectors*thetahat, axis=0)
+
+def get_cyl_z_component(vectors, normal):
+    # The z component of a vector is the vector dotted with zhat
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    zhat = np.tile(res_zprime, tile_shape)
+
+    return np.sum(vectors*zhat, axis=0)
+
+def get_sph_r_component(vectors, theta, phi, normal):
+    # The r component of a vector is the vector dotted with rhat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
+
+    rhat = Jx*np.sin(theta)*np.cos(phi) + \
+           Jy*np.sin(theta)*np.sin(phi) + \
+           Jz*np.cos(theta)
+
+    return np.sum(vectors*rhat, axis=0)
+
+def get_sph_phi_component(vectors, phi, normal):
+    # The phi component of a vector is the vector dotted with phihat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    phihat = -Jx*np.sin(phi) + Jy*np.cos(phi)
+
+    return np.sum(vectors*phihat, axis=0)
+
+def get_sph_theta_component(vectors, theta, phi, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
+    
+    thetahat = Jx*np.cos(theta)*np.cos(phi) + \
+               Jy*np.cos(theta)*np.sin(phi) - \
+               Jz*np.sin(theta)
+
+    return np.sum(vectors*thetahat, axis=0)


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/tests/test_coordinate_conversions.py
--- /dev/null
+++ b/yt/utilities/tests/test_coordinate_conversions.py
@@ -0,0 +1,125 @@
+from yt.testing import *
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
+
+# Randomly generated coordinates in the domain [[-1,1],[-1,1],-1,1]]
+coords = np.array([[-0.41503037, -0.22102472, -0.55774212],
+                   [ 0.73828247, -0.17913899,  0.64076921],
+                   [ 0.08922066, -0.94254844, -0.61774511],
+                   [ 0.10173242, -0.95789145,  0.16294352],
+                   [ 0.73186508, -0.3109153 ,  0.75728738],
+                   [ 0.8757989 , -0.41475119, -0.57039201],
+                   [ 0.58040762,  0.81969082,  0.46759728],
+                   [-0.89983356, -0.9853683 , -0.38355343]]).T
+
+def test_spherical_coordinate_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.72950559,  0.99384957,  1.13047198,  0.97696269,  
+                   1.09807968,  1.12445067,  1.10788685,  1.38843954]
+    real_theta = [ 2.44113629,  0.87012028,  2.14891444,  1.4032274 ,  
+                   0.80979483,  2.10280198,  1.13507735,  1.85068416]
+    real_phi =   [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+
+    calc_r = get_sph_r(coords)
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+    normal = [1, 0, 0]
+    real_theta = [ 2.17598842,  0.73347681,  1.49179079,  1.46647589,  
+                   0.8412984 ,  0.67793705,  1.0193883 ,  2.27586987]
+    real_phi =   [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                   -2.75201227,-0.62870527,  2.08920872, -1.19959244]
+
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+    
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+def test_cylindrical_coordiante_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.47021498,  0.75970506,  0.94676179,  0.96327853,  
+                   0.79516968,  0.96904193,  1.00437346,  1.3344104 ]    
+    real_theta = [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+    real_z =     [-0.55774212,  0.64076921, -0.61774511,  0.16294352,
+                   0.75728738, -0.57039201,  0.46759728, -0.38355343]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+    normal = [1, 0, 0]
+    real_r =     [ 0.59994016,  0.66533898,  1.12694569,  0.97165149,
+                   0.81862843,  0.70524152,  0.94368441,  1.05738542]
+    real_theta = [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                  -2.75201227, -0.62870527,  2.08920872, -1.19959244]
+    real_z =     [-0.41503037,  0.73828247,  0.08922066,  0.10173242,
+                   0.73186508,  0.8757989 ,  0.58040762, -0.89983356]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+def test_spherical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_sph_theta(coords, normal)
+    phi = get_sph_phi(coords, normal)
+    zero = np.tile(0,coords.shape[1])
+
+    # Purely radial field
+    vecs = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(phi), np.cos(phi), zero])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
+
+    # Purely poloidal field
+    vecs = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -np.sin(theta)])
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
+
+def test_cylindrical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_cyl_theta(coords, normal)
+    z = get_cyl_z(coords, normal)
+    zero = np.tile(0, coords.shape[1])
+
+    # Purely radial field
+    vecs = np.array([np.cos(theta), np.sin(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(theta), np.cos(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))
+
+    # Purely z field
+    vecs = np.array([zero, zero, z])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/tests/test_decompose.py
--- /dev/null
+++ b/yt/utilities/tests/test_decompose.py
@@ -0,0 +1,96 @@
+"""
+Test suite for cartesian domain decomposition.
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import assert_array_equal, assert_almost_equal
+import numpy as np
+import yt.utilities.decompose as dec
+
+
+def setup():
+    pass
+
+
+def test_psize_2d():
+    procs = dec.get_psize(np.array([5, 1, 7]), 6)
+    assert_array_equal(procs, np.array([3, 1, 2]))
+    procs = dec.get_psize(np.array([1, 7, 5]), 6)
+    assert_array_equal(procs, np.array([1, 2, 3]))
+    procs = dec.get_psize(np.array([7, 5, 1]), 6)
+    assert_array_equal(procs, np.array([2, 3, 1]))
+
+
+def test_psize_3d():
+    procs = dec.get_psize(np.array([33, 35, 37]), 12)
+    assert_array_equal(procs, np.array([3, 2, 2]))
+
+
+def test_decomposition_2d():
+    array = np.ones((7, 5, 1))
+    bbox = np.array([[-0.7, 0.0], [1.5, 2.0], [0.0, 0.7]])
+    ledge, redge, data = dec.decompose_array(array, np.array([2, 3, 1]), bbox)
+
+    assert_array_equal(data[1].shape, np.array([3, 2, 1]))
+
+    gold_le = np.array([
+                       [-0.7, 1.5, 0.0], [-0.7, 1.6, 0.0],
+                       [-0.7, 1.8, 0.0], [-0.4, 1.5, 0.0],
+                       [-0.4, 1.6, 0.0], [-0.4, 1.8, 0.0]
+                       ])
+    assert_almost_equal(ledge, gold_le, 8)
+
+    gold_re = np.array(
+        [[-0.4, 1.6, 0.7], [-0.4, 1.8, 0.7],
+         [-0.4, 2.0, 0.7], [0.0, 1.6, 0.7],
+         [0.0, 1.8, 0.7], [0.0, 2.0, 0.7]]
+    )
+    assert_almost_equal(redge, gold_re, 8)
+
+
+def test_decomposition_3d():
+    array = np.ones((33, 35, 37))
+    bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+
+    ledge, redge, data = dec.decompose_array(array, np.array([3, 2, 2]), bbox)
+    assert_array_equal(data[0].shape, np.array([11, 17, 18]))
+
+    gold_le = np.array(
+        [[0.00000, -1.50000, 1.00000], [0.00000, -1.50000, 1.72973],
+         [0.00000, -0.04286, 1.00000], [0.00000, -0.04286, 1.72973],
+         [0.33333, -1.50000, 1.00000], [0.33333, -1.50000, 1.72973],
+         [0.33333, -0.04286, 1.00000], [0.33333, -0.04286, 1.72973],
+         [0.66667, -1.50000, 1.00000], [0.66667, -1.50000, 1.72973],
+         [0.66667, -0.04286, 1.00000], [0.66667, -0.04286, 1.72973]]
+    )
+    assert_almost_equal(ledge, gold_le, 5)
+
+    gold_re = np.array(
+        [[0.33333, -0.04286, 1.72973], [0.33333, -0.04286, 2.50000],
+         [0.33333, 1.50000, 1.72973], [0.33333, 1.50000, 2.50000],
+         [0.66667, -0.04286, 1.72973], [0.66667, -0.04286, 2.50000],
+         [0.66667, 1.50000, 1.72973], [0.66667, 1.50000, 2.50000],
+         [1.00000, -0.04286, 1.72973], [1.00000, -0.04286, 2.50000],
+         [1.00000, 1.50000, 1.72973], [1.00000, 1.50000, 2.50000]]
+    )
+    assert_almost_equal(redge, gold_re, 5)


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/tests/test_kdtrees.py
--- a/yt/utilities/tests/test_kdtrees.py
+++ b/yt/utilities/tests/test_kdtrees.py
@@ -26,10 +26,7 @@
 from yt.testing import *
 
 try:
-    from yt.utilities.kdtree import \
-        chainHOP_tags_dens, \
-        create_tree, fKD, find_nn_nearest_neighbors, \
-        free_tree, find_chunk_nearest_neighbors
+    from yt.utilities.kdtree.api import *
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -364,39 +364,30 @@
 
 class StreamlineCallback(PlotCallback):
     _type_name = "streamlines"
-    def __init__(self, field_x, field_y, factor=6.0, nx=16, ny=16,
-                 xstart=(0,1), ystart=(0,1), nsample=256,
-                 start_at_xedge=False, start_at_yedge=False,
-                 plot_args=None):
+    def __init__(self, field_x, field_y, factor = 16,
+                 density = 1, arrowsize = 1, arrowstyle = None,
+                 color = None, normalize = False):
         """
-        annotate_streamlines(field_x, field_y, factor=6.0, nx=16, ny=16,
-                             xstart=(0,1), ystart=(0,1), nsample=256,
-                             start_at_xedge=False, start_at_yedge=False,
-                             plot_args=None):
+        annotate_streamlines(field_x, field_y, factor = 16, density = 1,
+                             arrowsize = 1, arrowstyle = None,
+                             color = None, normalize = False):
 
         Add streamlines to any plot, using the *field_x* and *field_y*
-        from the associated data, using *nx* and *ny* starting points
-        that are bounded by *xstart* and *ystart*.  To begin
-        streamlines from the left edge of the plot, set
-        *start_at_xedge* to True; for the bottom edge, use
-        *start_at_yedge*.  A line with the qmean vector magnitude will
-        cover 1.0/*factor* of the image.
+        from the associated data, skipping every *factor* datapoints like
+        'quiver'. *density* is the index of the amount of the streamlines.
         """
         PlotCallback.__init__(self)
         self.field_x = field_x
         self.field_y = field_y
-        self.xstart = xstart
-        self.ystart = ystart
-        self.nsample = nsample
+        self.bv_x = self.bv_y = 0
         self.factor = factor
-        if start_at_xedge:
-            self.data_size = (1,ny)
-        elif start_at_yedge:
-            self.data_size = (nx,1)
-        else:
-            self.data_size = (nx,ny)
-        if plot_args is None: plot_args = {'color':'k', 'linestyle':'-'}
-        self.plot_args = plot_args
+        self.dens = density
+        self.arrowsize = arrowsize
+        if arrowstyle is None : arrowstyle='-|>'
+        self.arrowstyle = arrowstyle
+        if color is None : color = "#000000"
+        self.color = color
+        self.normalize = normalize
         
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -404,43 +395,31 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0]
-        ny = plot.image._A.shape[1]
+        nx = plot.image._A.shape[0] / self.factor
+        ny = plot.image._A.shape[1] / self.factor
         pixX = _MPL.Pixelize(plot.data['px'],
                              plot.data['py'],
                              plot.data['pdx'],
                              plot.data['pdy'],
-                             plot.data[self.field_x],
+                             plot.data[self.field_x] - self.bv_x,
                              int(nx), int(ny),
-                           (x0, x1, y0, y1),)
+                           (x0, x1, y0, y1),).transpose()
         pixY = _MPL.Pixelize(plot.data['px'],
                              plot.data['py'],
                              plot.data['pdx'],
                              plot.data['pdy'],
-                             plot.data[self.field_y],
+                             plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
-                           (x0, x1, y0, y1),)
-        r0 = np.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
-                      self.ystart[0]*ny:self.ystart[1]*ny:self.data_size[1]*1j]
-        lines = np.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
-        lines[0,:,:,:] = r0
-        mag = np.sqrt(pixX**2 + pixY**2)
-        scale = np.sqrt(nx*ny) / (self.factor * mag.mean())
-        dt = 1.0 / (self.nsample-1)
-        for i in range(1,self.nsample):
-            xt = lines[i-1,0,:,:]
-            yt = lines[i-1,1,:,:]
-            ix = np.maximum(np.minimum((xt).astype('int'), nx-1), 0)
-            iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
-            lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
-            lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
-        # scale into data units
-        lines[:,0,:,:] = lines[:,0,:,:] * (xx1 - xx0) / nx + xx0
-        lines[:,1,:,:] = lines[:,1,:,:] * (yy1 - yy0) / ny + yy0
-        for i in range(self.data_size[0]):
-            for j in range(self.data_size[1]):
-                plot._axes.plot(lines[:,0,i,j], lines[:,1,i,j],
-                                **self.plot_args)
+                           (x0, x1, y0, y1),).transpose()
+        X,Y = (na.linspace(xx0,xx1,nx,endpoint=True),
+                          na.linspace(yy0,yy1,ny,endpoint=True))
+        if self.normalize:
+            nn = na.sqrt(pixX**2 + pixY**2)
+            pixX /= nn
+            pixY /= nn
+        plot._axes.streamplot(X,Y, pixX, pixY, density=self.dens,
+                              arrowsize=self.arrowsize, arrowstyle=self.arrowstyle,
+                              color=self.color, norm=self.normalize)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -146,7 +146,8 @@
     @parallel_passthrough
     def _finalize_parallel(self,data):
         self.streamlines = self.comm.mpi_allreduce(self.streamlines, op='sum')
-        self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
+        if self.get_magnitude:
+            self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
         
     def _integrate_through_brick(self, node, stream, step,
                                  periodic=False, mag=None):



https://bitbucket.org/yt_analysis/yt/changeset/201708b64058/
changeset:   201708b64058
branch:      yt
user:        MatthewTurk
date:        2012-10-23 20:18:56
summary:     Fixing the typo (thanks, Nathan!), and adding a big_data option to
@requires_pf.
affected #:  3 files

diff -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 -r 201708b64058a3cbfe9c458c27655e67f6c399e2 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -8,4 +8,4 @@
 exclude=answer_testing
 with-xunit=1
 with-answer-testing=1
-#answer-compare=gold001
+answer-compare=gold001


diff -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 -r 201708b64058a3cbfe9c458c27655e67f6c399e2 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -43,7 +43,7 @@
         yield test
 
 g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
- at requires_pf(g30)
+ at requires_pf(g30, big_data=True)
 def test_galaxy0030():
     pf = data_dir_load(g30)
     yield assert_equal, str(pf), "galaxy0030"


diff -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 -r 201708b64058a3cbfe9c458c27655e67f6c399e2 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -74,6 +74,9 @@
             my_hash = "UNKNOWN%s" % (time.time())
         parser.add_option("--answer-compare", dest="compare_name",
             default=None, help="The name against which we will compare")
+        parser.add_option("--answer-big-data", dest="big_data",
+            default=False, help="Should we run against big data, too?",
+            action="store_true")
         parser.add_option("--answer-name", dest="this_name",
             default=my_hash,
             help="The name we'll call this set of tests")
@@ -97,6 +100,8 @@
                 AnswerTestOpener(options.compare_name)
         self.answer_name = options.this_name
         self.store_results = options.store_results
+        global run_big_data
+        run_big_data = options.big_data
 
     def finalize(self, result):
         # This is where we dump our result storage up to Amazon, if we are able
@@ -148,7 +153,7 @@
         if self.reference_storage is not None:
             dd = self.reference_storage.get(str(self.pf))
             if dd is None: raise YTNoOldAnswer()
-            ov = dd[self.descrption]
+            ov = dd[self.description]
             self.compare(nv, ov)
         else:
             ov = None
@@ -345,12 +350,15 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
-def requires_pf(pf_fn):
+def requires_pf(pf_fn, big_data = False):
     def ffalse(func):
         return lambda: None
     def ftrue(func):
         return func
-    if not can_run_pf(pf_fn):
+    global run_big_data
+    if run_big_data == False and big_data == True:
+        return ffalse
+    elif not can_run_pf(pf_fn):
         return ffalse
     else:
         return ftrue



https://bitbucket.org/yt_analysis/yt/changeset/58b6bb1c8283/
changeset:   58b6bb1c8283
branch:      yt
user:        MatthewTurk
date:        2012-10-23 20:47:23
summary:     Splitting the answer testing into a separate plugin file, to allow it to be
imported directly in setup.py.
affected #:  3 files

diff -r 201708b64058a3cbfe9c458c27655e67f6c399e2 -r 58b6bb1c8283318125e9691a93d6b1ac6b715e2d setup.py
--- a/setup.py
+++ b/setup.py
@@ -170,4 +170,6 @@
     return
 
 if __name__ == '__main__':
+    if "nosetests" in sys.argv: 
+        from yt.utilities.answer_testing.plugin import AnswerTesting
     setup_package()


diff -r 201708b64058a3cbfe9c458c27655e67f6c399e2 -r 58b6bb1c8283318125e9691a93d6b1ac6b715e2d yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -29,12 +29,9 @@
 import contextlib
 import urllib2
 
+from .plugin import AnswerTesting, run_big_data
 from yt.testing import *
-from yt.utilities.command_line import get_yt_version
 from yt.config import ytcfg
-from yt.utilities.logger import \
-    disable_stream_logging
-from nose.plugins import Plugin
 from yt.mods import *
 import cPickle
 
@@ -63,63 +60,6 @@
         self.cache[pf_name] = rv
         return rv
 
-class AnswerTesting(Plugin):
-    name = "answer-testing"
-
-    def options(self, parser, env=os.environ):
-        super(AnswerTesting, self).options(parser, env=env)
-        try:
-            my_hash = get_yt_version()
-        except:
-            my_hash = "UNKNOWN%s" % (time.time())
-        parser.add_option("--answer-compare", dest="compare_name",
-            default=None, help="The name against which we will compare")
-        parser.add_option("--answer-big-data", dest="big_data",
-            default=False, help="Should we run against big data, too?",
-            action="store_true")
-        parser.add_option("--answer-name", dest="this_name",
-            default=my_hash,
-            help="The name we'll call this set of tests")
-        parser.add_option("--answer-store", dest="store_results",
-            default=False, action="store_true")
-
-    def configure(self, options, conf):
-        super(AnswerTesting, self).configure(options, conf)
-        if not self.enabled:
-            return
-        disable_stream_logging()
-        from yt.config import ytcfg
-        ytcfg["yt","__withintesting"] = "True"
-        AnswerTestingTest.result_storage = \
-            self.result_storage = defaultdict(dict)
-        if options.compare_name is not None:
-            # Now we grab from our S3 store
-            if options.compare_name == "latest":
-                options.compare_name = _latest
-            AnswerTestingTest.reference_storage = \
-                AnswerTestOpener(options.compare_name)
-        self.answer_name = options.this_name
-        self.store_results = options.store_results
-        global run_big_data
-        run_big_data = options.big_data
-
-    def finalize(self, result):
-        # This is where we dump our result storage up to Amazon, if we are able
-        # to.
-        if self.store_results is False: return
-        import boto
-        from boto.s3.key import Key
-        c = boto.connect_s3()
-        bucket = c.get_bucket("yt-answer-tests")
-        for pf_name in self.result_storage:
-            rs = cPickle.dumps(self.result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
-            if tk is not None: tk.delete()
-            k = Key(bucket)
-            k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
-            k.set_acl("public-read")
-
 @contextlib.contextmanager
 def temp_cwd(cwd):
     oldcwd = os.getcwd()
@@ -355,7 +295,6 @@
         return lambda: None
     def ftrue(func):
         return func
-    global run_big_data
     if run_big_data == False and big_data == True:
         return ffalse
     elif not can_run_pf(pf_fn):


diff -r 201708b64058a3cbfe9c458c27655e67f6c399e2 -r 58b6bb1c8283318125e9691a93d6b1ac6b715e2d yt/utilities/answer_testing/plugin.py
--- /dev/null
+++ b/yt/utilities/answer_testing/plugin.py
@@ -0,0 +1,97 @@
+"""
+Answer Testing using Nose as a starting point
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import logging
+import os
+import hashlib
+import contextlib
+import urllib2
+import cPickle
+from collections import defaultdict
+from nose.plugins import Plugin
+
+run_big_data = False
+
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=None, help="The name against which we will compare")
+        parser.add_option("--answer-big-data", dest="big_data",
+            default=False, help="Should we run against big data, too?",
+            action="store_true")
+        parser.add_option("--answer-name", dest="this_name",
+            default=None,
+            help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+        from yt.utilities.logger import disable_stream_logging
+        disable_stream_logging()
+        from yt.utilities.command_line import get_yt_version
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        if options.this_name is None: options.this_name = my_hash
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
+        from .framework import AnswerTestingTest, AnswerTestOpener
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
+        if options.compare_name is not None:
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
+        global run_big_data
+        run_big_data = options.big_data
+
+    def finalize(self, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+



https://bitbucket.org/yt_analysis/yt/changeset/0ff74ce4cb81/
changeset:   0ff74ce4cb81
branch:      yt
user:        MatthewTurk
date:        2012-10-23 20:50:16
summary:     Removing items from the answer testing __init__.py.  This may fix the issues
with Shining Panda.
affected #:  2 files

diff -r 58b6bb1c8283318125e9691a93d6b1ac6b715e2d -r 0ff74ce4cb81b2c7351b4313d0062fda89c99956 yt/utilities/answer_testing/__init__.py
--- a/yt/utilities/answer_testing/__init__.py
+++ b/yt/utilities/answer_testing/__init__.py
@@ -22,10 +22,3 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
-import runner
-import output_tests
-from runner import RegressionTestRunner
-
-from output_tests import RegressionTest, SingleOutputTest, \
-    MultipleOutputTest, YTStaticOutputTest, create_test


diff -r 58b6bb1c8283318125e9691a93d6b1ac6b715e2d -r 0ff74ce4cb81b2c7351b4313d0062fda89c99956 yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -57,8 +57,3 @@
     TestBooleanANDParticleQuantity, \
     TestBooleanORParticleQuantity, \
     TestBooleanNOTParticleQuantity
-
-try:
-    from .framework import AnswerTesting
-except ImportError:
-    raise



https://bitbucket.org/yt_analysis/yt/changeset/4bd73932fca9/
changeset:   4bd73932fca9
branch:      yt
user:        MatthewTurk
date:        2012-10-23 21:18:17
summary:     Moving the answer testing plugin back where it came from, to avoid the weird
imports.  I believe I now have a solution to running nosetests on SP.
affected #:  4 files

diff -r 0ff74ce4cb81b2c7351b4313d0062fda89c99956 -r 4bd73932fca9eacd0a9876f8970e10e06803cb28 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -7,5 +7,3 @@
 where=yt
 exclude=answer_testing
 with-xunit=1
-with-answer-testing=1
-answer-compare=gold001


diff -r 0ff74ce4cb81b2c7351b4313d0062fda89c99956 -r 4bd73932fca9eacd0a9876f8970e10e06803cb28 setup.py
--- a/setup.py
+++ b/setup.py
@@ -156,7 +156,7 @@
                             'yt = yt.utilities.command_line:run_main',
                       ],
                       'nose.plugins.0.10': [
-                            'answer-testing = yt.utilities.answer_testing.api:AnswerTesting'
+                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
                       ]
         },
         author="Matthew J. Turk",
@@ -170,6 +170,4 @@
     return
 
 if __name__ == '__main__':
-    if "nosetests" in sys.argv: 
-        from yt.utilities.answer_testing.plugin import AnswerTesting
     setup_package()


diff -r 0ff74ce4cb81b2c7351b4313d0062fda89c99956 -r 4bd73932fca9eacd0a9876f8970e10e06803cb28 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -28,18 +28,81 @@
 import hashlib
 import contextlib
 import urllib2
+import cPickle
 
-from .plugin import AnswerTesting, run_big_data
+from nose.plugins import Plugin
 from yt.testing import *
 from yt.config import ytcfg
 from yt.mods import *
 import cPickle
 
+from yt.utilities.logger import disable_stream_logging
+from yt.utilities.command_line import get_yt_version
+
 mylog = logging.getLogger('nose.plugins.answer-testing')
+run_big_data = False
 
-_latest = "SomeValue"
+_latest = "gold001"
 _url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
 
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=_latest, help="The name against which we will compare")
+        parser.add_option("--answer-big-data", dest="big_data",
+            default=False, help="Should we run against big data, too?",
+            action="store_true")
+        parser.add_option("--answer-name", dest="this_name",
+            default=None,
+            help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+        disable_stream_logging()
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        if options.this_name is None: options.this_name = my_hash
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
+        if options.compare_name is not None:
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
+        global run_big_data
+        run_big_data = options.big_data
+
+    def finalize(self, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
 class AnswerTestOpener(object):
     def __init__(self, reference_name):
         self.reference_name = reference_name


diff -r 0ff74ce4cb81b2c7351b4313d0062fda89c99956 -r 4bd73932fca9eacd0a9876f8970e10e06803cb28 yt/utilities/answer_testing/plugin.py
--- a/yt/utilities/answer_testing/plugin.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""
-Answer Testing using Nose as a starting point
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-import logging
-import os
-import hashlib
-import contextlib
-import urllib2
-import cPickle
-from collections import defaultdict
-from nose.plugins import Plugin
-
-run_big_data = False
-
-class AnswerTesting(Plugin):
-    name = "answer-testing"
-
-    def options(self, parser, env=os.environ):
-        super(AnswerTesting, self).options(parser, env=env)
-        parser.add_option("--answer-compare", dest="compare_name",
-            default=None, help="The name against which we will compare")
-        parser.add_option("--answer-big-data", dest="big_data",
-            default=False, help="Should we run against big data, too?",
-            action="store_true")
-        parser.add_option("--answer-name", dest="this_name",
-            default=None,
-            help="The name we'll call this set of tests")
-        parser.add_option("--answer-store", dest="store_results",
-            default=False, action="store_true")
-
-    def configure(self, options, conf):
-        super(AnswerTesting, self).configure(options, conf)
-        if not self.enabled:
-            return
-        from yt.utilities.logger import disable_stream_logging
-        disable_stream_logging()
-        from yt.utilities.command_line import get_yt_version
-        try:
-            my_hash = get_yt_version()
-        except:
-            my_hash = "UNKNOWN%s" % (time.time())
-        if options.this_name is None: options.this_name = my_hash
-        from yt.config import ytcfg
-        ytcfg["yt","__withintesting"] = "True"
-        from .framework import AnswerTestingTest, AnswerTestOpener
-        AnswerTestingTest.result_storage = \
-            self.result_storage = defaultdict(dict)
-        if options.compare_name is not None:
-            # Now we grab from our S3 store
-            if options.compare_name == "latest":
-                options.compare_name = _latest
-            AnswerTestingTest.reference_storage = \
-                AnswerTestOpener(options.compare_name)
-        self.answer_name = options.this_name
-        self.store_results = options.store_results
-        global run_big_data
-        run_big_data = options.big_data
-
-    def finalize(self, result):
-        # This is where we dump our result storage up to Amazon, if we are able
-        # to.
-        if self.store_results is False: return
-        import boto
-        from boto.s3.key import Key
-        c = boto.connect_s3()
-        bucket = c.get_bucket("yt-answer-tests")
-        for pf_name in self.result_storage:
-            rs = cPickle.dumps(self.result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
-            if tk is not None: tk.delete()
-            k = Key(bucket)
-            k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
-            k.set_acl("public-read")
-



https://bitbucket.org/yt_analysis/yt/changeset/a70531b61894/
changeset:   a70531b61894
branch:      yt
user:        MatthewTurk
date:        2012-10-23 23:39:04
summary:     Adding checks to assert_rel_equal for NaNs.  These may show up if you (for
instance) project something that's weighted with zero values.
affected #:  1 file

diff -r 4bd73932fca9eacd0a9876f8970e10e06803cb28 -r a70531b618946c8018b3954ac87f5df49b36f4b0 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -29,6 +29,15 @@
     assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
 
 def assert_rel_equal(a1, a2, decimals):
+    # We have nan checks in here because occasionally we have fields that get
+    # weighted without non-zero weights.  I'm looking at you, particle fields!
+    if isinstance(a1, np.ndarray):
+        assert(a1.size == a2.size)
+        # Mask out NaNs
+        a1[np.isnan(a1)] = 1.0
+        a2[np.isnan(a2)] = 1.0
+    elif np.isnan(a1) and np.isnan(a2):
+        return True
     return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):



https://bitbucket.org/yt_analysis/yt/changeset/dd8b7e82f5ee/
changeset:   dd8b7e82f5ee
branch:      yt
user:        ngoldbaum
date:        2012-10-24 00:53:32
summary:     Merged in MatthewTurk/yt (pull request #308)
affected #:  11 files



diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 setup.py
--- a/setup.py
+++ b/setup.py
@@ -154,7 +154,11 @@
             'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
                             'yt = yt.utilities.command_line:run_main',
-                       ]},
+                      ],
+                      'nose.plugins.0.10': [
+                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+                      ]
+        },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",


diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -54,6 +54,7 @@
     pasteboard_repo = '',
     reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
+    test_data_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',


diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/frontends/enzo/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -0,0 +1,51 @@
+"""
+Enzo frontend tests using moving7
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.enzo.api import EnzoStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
+           "particle_density")
+
+m7 = "DD0010/moving7_0010"
+ at requires_pf(m7)
+def test_moving7():
+    pf = data_dir_load(m7)
+    yield assert_equal, str(pf), "moving7_0010"
+    for test in small_patch_amr(m7, _fields):
+        yield test
+
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+ at requires_pf(g30, big_data=True)
+def test_galaxy0030():
+    pf = data_dir_load(g30)
+    yield assert_equal, str(pf), "galaxy0030"
+    for test in big_patch_amr(g30, _fields):
+        yield test


diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -29,6 +29,15 @@
     assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
 
 def assert_rel_equal(a1, a2, decimals):
+    # We have nan checks in here because occasionally we have fields that get
+    # weighted without non-zero weights.  I'm looking at you, particle fields!
+    if isinstance(a1, np.ndarray):
+        assert(a1.size == a2.size)
+        # Mask out NaNs
+        a1[np.isnan(a1)] = 1.0
+        a2[np.isnan(a2)] = 1.0
+    elif np.isnan(a1) and np.isnan(a2):
+        return True
     return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):


diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/utilities/answer_testing/__init__.py
--- a/yt/utilities/answer_testing/__init__.py
+++ b/yt/utilities/answer_testing/__init__.py
@@ -22,10 +22,3 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
-import runner
-import output_tests
-from runner import RegressionTestRunner
-
-from output_tests import RegressionTest, SingleOutputTest, \
-    MultipleOutputTest, YTStaticOutputTest, create_test




diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/utilities/answer_testing/default_tests.py
--- a/yt/utilities/answer_testing/default_tests.py
+++ b/yt/utilities/answer_testing/default_tests.py
@@ -67,3 +67,4 @@
         for field in sorted(self.result):
             for p1, p2 in zip(self.result[field], old_result[field]):
                 self.compare_data_arrays(p1, p2, self.tolerance)
+


diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/utilities/answer_testing/framework.py
--- /dev/null
+++ b/yt/utilities/answer_testing/framework.py
@@ -0,0 +1,396 @@
+"""
+Answer Testing using Nose as a starting point
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import logging
+import os
+import hashlib
+import contextlib
+import urllib2
+import cPickle
+
+from nose.plugins import Plugin
+from yt.testing import *
+from yt.config import ytcfg
+from yt.mods import *
+import cPickle
+
+from yt.utilities.logger import disable_stream_logging
+from yt.utilities.command_line import get_yt_version
+
+mylog = logging.getLogger('nose.plugins.answer-testing')
+run_big_data = False
+
+_latest = "gold001"
+_url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
+
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=_latest, help="The name against which we will compare")
+        parser.add_option("--answer-big-data", dest="big_data",
+            default=False, help="Should we run against big data, too?",
+            action="store_true")
+        parser.add_option("--answer-name", dest="this_name",
+            default=None,
+            help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+        disable_stream_logging()
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        if options.this_name is None: options.this_name = my_hash
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
+        if options.compare_name is not None:
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
+        global run_big_data
+        run_big_data = options.big_data
+
+    def finalize(self, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestOpener(object):
+    def __init__(self, reference_name):
+        self.reference_name = reference_name
+        self.cache = {}
+
+    def get(self, pf_name, default = None):
+        if pf_name in self.cache: return self.cache[pf_name]
+        url = _url_path % (self.reference_name, pf_name)
+        try:
+            resp = urllib2.urlopen(url)
+            # This is dangerous, but we have a controlled S3 environment
+            data = resp.read()
+            rv = cPickle.loads(data)
+        except urllib2.HTTPError as ex:
+            raise YTNoOldAnswer(url)
+            mylog.warning("Missing %s (%s)", url, ex)
+            rv = default
+        self.cache[pf_name] = rv
+        return rv
+
+ at contextlib.contextmanager
+def temp_cwd(cwd):
+    oldcwd = os.getcwd()
+    os.chdir(cwd)
+    yield
+    os.chdir(oldcwd)
+
+def can_run_pf(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        try:
+            load(pf_fn)
+        except:
+            return False
+    return AnswerTestingTest.result_storage is not None
+
+def data_dir_load(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        pf = load(pf_fn)
+        pf.h
+        return pf
+
+class AnswerTestingTest(object):
+    reference_storage = None
+    def __init__(self, pf_fn):
+        self.pf = data_dir_load(pf_fn)
+
+    def __call__(self):
+        nv = self.run()
+        if self.reference_storage is not None:
+            dd = self.reference_storage.get(str(self.pf))
+            if dd is None: raise YTNoOldAnswer()
+            ov = dd[self.description]
+            self.compare(nv, ov)
+        else:
+            ov = None
+        self.result_storage[str(self.pf)][self.description] = nv
+
+    def compare(self, new_result, old_result):
+        raise RuntimeError
+
+    def create_obj(self, pf, obj_type):
+        # obj_type should be tuple of
+        #  ( obj_name, ( args ) )
+        if obj_type is None:
+            return pf.h.all_data()
+        cls = getattr(pf.h, obj_type[0])
+        obj = cls(*obj_type[1])
+        return obj
+
+    @property
+    def sim_center(self):
+        """
+        This returns the center of the domain.
+        """
+        return 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+
+    @property
+    def max_dens_location(self):
+        """
+        This is a helper function to return the location of the most dense
+        point.
+        """
+        return self.pf.h.find_max("Density")[1]
+
+    @property
+    def entire_simulation(self):
+        """
+        Return an unsorted array of values that cover the entire domain.
+        """
+        return self.pf.h.all_data()
+
+    @property
+    def description(self):
+        obj_type = getattr(self, "obj_type", None)
+        if obj_type is None:
+            oname = "all"
+        else:
+            oname = "_".join((str(s) for s in obj_type))
+        args = [self._type_name, str(self.pf), oname]
+        args += [str(getattr(self, an)) for an in self._attrs]
+        return "_".join(args)
+        
+class FieldValuesTest(AnswerTestingTest):
+    _type_name = "FieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None):
+        super(FieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        avg = obj.quantities["WeightedAverageQuantity"](self.field,
+                             weight="Ones")
+        (mi, ma), = obj.quantities["Extrema"](self.field)
+        return np.array([avg, mi, ma])
+
+    def compare(self, new_result, old_result):
+        assert_equal(new_result, old_result)
+
+class ProjectionValuesTest(AnswerTestingTest):
+    _type_name = "ProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(ProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        return proj.field_data
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class PixelizedProjectionValuesTest(AnswerTestingTest):
+    _type_name = "PixelizedProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(PixelizedProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        frb = proj.to_frb((1.0, 'unitary'), 256)
+        frb[self.field]
+        frb[self.weight_field]
+        d = frb.data
+        d.update( dict( (("%s_sum" % f, proj[f].sum(dtype="float64"))
+                         for f in proj.field_data.keys()) ) )
+        return d
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_rel_equal(new_result[k], old_result[k], 10)
+
+class GridValuesTest(AnswerTestingTest):
+    _type_name = "GridValues"
+    _attrs = ("field",)
+
+    def __init__(self, pf_fn, field):
+        super(GridValuesTest, self).__init__(pf_fn)
+        self.field = field
+
+    def run(self):
+        hashes = {}
+        for g in self.pf.h.grids:
+            hashes[g.id] = hashlib.md5(g[self.field].tostring()).hexdigest()
+            g.clear_data()
+        return hashes
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class GridHierarchyTest(AnswerTestingTest):
+    _type_name = "GridHierarchy"
+    _attrs = ()
+
+    def run(self):
+        result = {}
+        result["grid_dimensions"] = self.pf.h.grid_dimensions
+        result["grid_left_edges"] = self.pf.h.grid_left_edge
+        result["grid_right_edges"] = self.pf.h.grid_right_edge
+        result["grid_levels"] = self.pf.h.grid_levels
+        result["grid_particle_count"] = self.pf.h.grid_particle_count
+        return result
+
+    def compare(self, new_result, old_result):
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class ParentageRelationshipsTest(AnswerTestingTest):
+    _type_name = "ParentageRelationships"
+    _attrs = ()
+    def run(self):
+        result = {}
+        result["parents"] = []
+        result["children"] = []
+        for g in self.pf.h.grids:
+            p = g.Parent
+            if p is None:
+                result["parents"].append(None)
+            elif hasattr(p, "id"):
+                result["parents"].append(p.id)
+            else:
+                result["parents"].append([pg.id for pg in p])
+            result["children"].append([c.id for c in g.Children])
+        return result
+
+    def compare(self, new_result, old_result):
+        for newp, oldp in zip(new_result["parents"], old_result["parents"]):
+            assert(newp == oldp)
+        for newc, oldc in zip(new_result["children"], old_result["children"]):
+            assert(newp == oldp)
+
+def requires_pf(pf_fn, big_data = False):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    if run_big_data == False and big_data == True:
+        return ffalse
+    elif not can_run_pf(pf_fn):
+        return ffalse
+    else:
+        return ftrue
+
+def small_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+
+def big_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)


diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -29,14 +29,12 @@
 # We first create our dictionary of tests to run.  This starts out empty, and
 # as tests are imported it will be filled.
 if "TestRegistry" not in locals():
-    print "Initializing TestRegistry"
     class TestRegistry(dict):
         def __new__(cls, *p, **k):
             if not '_the_instance' in cls.__dict__:
                 cls._the_instance = dict.__new__(cls)
                 return cls._the_instance
 if "test_registry" not in locals():
-    print "Initializing test_registry"
     test_registry = TestRegistry()
 
 # The exceptions we raise, related to the character of the failure.


diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -154,3 +154,11 @@
     def __str__(self):
         return "No filenames were found to match the pattern: " + \
                "'%s'" % (self.pattern)
+
+class YTNoOldAnswer(YTException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "There is no old answer available.\n" + \
+               str(self.path)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list