[yt-svn] commit/yt: 18 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Dec 17 13:21:05 PST 2015


18 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/d3cf4f5d498a/
Changeset:   d3cf4f5d498a
Branch:      yt
User:        xarthisius
Date:        2015-12-02 01:00:58+00:00
Summary:     Add parallel nose runner, store all information for answer testing in a dedicated yaml file
Affected #:  2 files

diff -r 315acc8b8296a1655efbc5fa6dfc9c88fab44b62 -r d3cf4f5d498aca3118106178c44160e81b4b8187 tests/nose_runner.py
--- /dev/null
+++ b/tests/nose_runner.py
@@ -0,0 +1,30 @@
+import sys
+import os
+import yaml
+import multiprocessing as mp
+import nose
+from yt.config import ytcfg
+from yt.utilities.answer_testing.framework import AnswerTesting
+
+test_dir = ytcfg.get("yt", "test_data_dir")
+answers_dir = os.path.join(test_dir, "answers")
+
+with open('tests/tests_%i.%i.yaml' % sys.version_info[:2], 'r') as obj:
+    tests = yaml.load(obj)
+
+base_argv = ['fake', '--local-dir=%s' % answers_dir, '-v', '-s', '--nologcapture',
+             '--with-answer-testing', '--answer-big-data', '--local']
+
+def run_job(answer):
+    global base_argv, answers_dir
+    argv = base_argv
+    argv.append('--xunit-file=%s.xml' % answer)
+    argv.append('--answer-name=%s' % answer)
+    argv += tests[answer]
+    if not os.path.isdir(os.path.join(answers_dir, answer)):
+        nose.run(argv=argv + ['--answer-store'],
+                 addplugins=[AnswerTesting()], exit=False)
+    nose.run(argv=argv, addplugins=[AnswerTesting()], exit=False)
+
+pool = mp.Pool(processes=mp.cpu_count())
+results = pool.map(run_job, list(tests.keys()))

diff -r 315acc8b8296a1655efbc5fa6dfc9c88fab44b62 -r d3cf4f5d498aca3118106178c44160e81b4b8187 tests/tests_2.7.yaml
--- /dev/null
+++ b/tests/tests_2.7.yaml
@@ -0,0 +1,51 @@
+local_artio_301:
+  - yt/frontends/artio/tests/test_outputs.py
+
+local_athena_301:
+  - yt/frontends/athena
+
+local_chombo_302:
+  - yt/frontends/chombo/tests/test_outputs.py
+
+local_enzo_301:
+  - yt/frontends/enzo
+
+local_fits_301:
+  - yt/frontends/fits/tests/test_outputs.py
+
+local_flash_301:
+  - yt/frontends/flash/tests/test_outputs.py
+
+local_gadget_301:
+  - yt/frontends/gadget/tests/test_outputs.py
+
+local_halos_302:
+  - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
+  - yt/analysis_modules/halo_finding/tests/test_rockstar.py
+  - yt/frontends/owls_subfind/tests/test_outputs.py
+
+local_owls_303:
+  - yt/frontends/owls/tests/test_outputs.py
+
+local_pw_303:
+  - yt/visualization/tests/test_plotwindow.py:test_attributes
+  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+
+local_tipsy_305:
+  - yt/frontends/tipsy/tests/test_outputs.py
+
+local_varia_304:
+  - yt/analysis_modules/radmc3d_export
+  - yt/frontends/moab/tests/test_c5.py
+  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+
+local_orion_301:
+  - yt/frontends/boxlib/tests/test_orion.py
+
+local_ramses_301:
+  - yt/frontends/ramses/tests/test_outputs.py
+
+local_ytdata_301:
+  - yt/frontends/ytdata


https://bitbucket.org/yt_analysis/yt/commits/0c1329a8db9c/
Changeset:   0c1329a8db9c
Branch:      yt
User:        xarthisius
Date:        2015-12-02 02:18:00+00:00
Summary:     Fix argv initialization
Affected #:  1 file

diff -r d3cf4f5d498aca3118106178c44160e81b4b8187 -r 0c1329a8db9c8767b55d7a3605735172f6ed4add tests/nose_runner.py
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -16,8 +16,9 @@
              '--with-answer-testing', '--answer-big-data', '--local']
 
 def run_job(answer):
-    global base_argv, answers_dir
-    argv = base_argv
+    global base_argv, answers_dir, tests
+    argv = []
+    argv += base_argv
     argv.append('--xunit-file=%s.xml' % answer)
     argv.append('--answer-name=%s' % answer)
     argv += tests[answer]


https://bitbucket.org/yt_analysis/yt/commits/6822af47bc51/
Changeset:   6822af47bc51
Branch:      yt
User:        xarthisius
Date:        2015-12-02 02:58:59+00:00
Summary:     Create temporary files for image comparison in answer testing
Affected #:  1 file

diff -r 0c1329a8db9c8767b55d7a3605735172f6ed4add -r 6822af47bc510159a4f85d7d5fcca7265d95a796 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -671,7 +671,11 @@
                             err_msg=err_msg, verbose=True)
 
 def compare_image_lists(new_result, old_result, decimals):
-    fns = ['old.png', 'new.png']
+    fns = []
+    for i in range(2):
+        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+        os.close(tmpfd)
+        fns.append(tmpname)
     num_images = len(old_result)
     assert(num_images > 0)
     for i in range(num_images):


https://bitbucket.org/yt_analysis/yt/commits/c25fc3d4d895/
Changeset:   c25fc3d4d895
Branch:      yt
User:        xarthisius
Date:        2015-12-02 03:13:37+00:00
Summary:     Simplify nose_runner
Affected #:  1 file

diff -r 6822af47bc510159a4f85d7d5fcca7265d95a796 -r c25fc3d4d895ced3e9d2858bad114b5caeeef98f tests/nose_runner.py
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -12,20 +12,25 @@
 with open('tests/tests_%i.%i.yaml' % sys.version_info[:2], 'r') as obj:
     tests = yaml.load(obj)
 
-base_argv = ['fake', '--local-dir=%s' % answers_dir, '-v', '-s', '--nologcapture',
+base_argv = ['--local-dir=%s' % answers_dir, '-v', '-s', '--nologcapture',
              '--with-answer-testing', '--answer-big-data', '--local']
-
-def run_job(answer):
-    global base_argv, answers_dir, tests
-    argv = []
+args = []
+for answer in list(tests.keys()):
+    argv = [answer]
     argv += base_argv
     argv.append('--xunit-file=%s.xml' % answer)
     argv.append('--answer-name=%s' % answer)
     argv += tests[answer]
+    args.append(argv)
+
+def run_job(argv):
+    answer = argv[0]
     if not os.path.isdir(os.path.join(answers_dir, answer)):
         nose.run(argv=argv + ['--answer-store'],
                  addplugins=[AnswerTesting()], exit=False)
     nose.run(argv=argv, addplugins=[AnswerTesting()], exit=False)
 
 pool = mp.Pool(processes=mp.cpu_count())
-results = pool.map(run_job, list(tests.keys()))
+results = pool.map(run_job, args)
+pool.close()
+pool.join()


https://bitbucket.org/yt_analysis/yt/commits/fa997675c1dd/
Changeset:   fa997675c1dd
Branch:      yt
User:        xarthisius
Date:        2015-12-08 17:10:15+00:00
Summary:     Use mp.Process instead of mp.Pool
Affected #:  2 files

diff -r c25fc3d4d895ced3e9d2858bad114b5caeeef98f -r fa997675c1ddb6f400b1462126630dd7af4a31ea tests/nose_runner.py
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -3,34 +3,46 @@
 import yaml
 import multiprocessing as mp
 import nose
+import glob
 from yt.config import ytcfg
 from yt.utilities.answer_testing.framework import AnswerTesting
 
-test_dir = ytcfg.get("yt", "test_data_dir")
-answers_dir = os.path.join(test_dir, "answers")
-
-with open('tests/tests_%i.%i.yaml' % sys.version_info[:2], 'r') as obj:
-    tests = yaml.load(obj)
-
-base_argv = ['--local-dir=%s' % answers_dir, '-v', '-s', '--nologcapture',
-             '--with-answer-testing', '--answer-big-data', '--local']
-args = []
-for answer in list(tests.keys()):
-    argv = [answer]
-    argv += base_argv
-    argv.append('--xunit-file=%s.xml' % answer)
-    argv.append('--answer-name=%s' % answer)
-    argv += tests[answer]
-    args.append(argv)
-
 def run_job(argv):
+    cur_stderr = sys.stderr
+    sys.stderr = open(str(os.getpid()) + ".out", "w")
     answer = argv[0]
+    test_dir = ytcfg.get("yt", "test_data_dir")
+    answers_dir = os.path.join(test_dir, "answers")
     if not os.path.isdir(os.path.join(answers_dir, answer)):
         nose.run(argv=argv + ['--answer-store'],
                  addplugins=[AnswerTesting()], exit=False)
     nose.run(argv=argv, addplugins=[AnswerTesting()], exit=False)
+    sys.stderr = cur_stderr
 
-pool = mp.Pool(processes=mp.cpu_count())
-results = pool.map(run_job, args)
-pool.close()
-pool.join()
+if __name__ == "__main__":
+    test_dir = ytcfg.get("yt", "test_data_dir")
+    answers_dir = os.path.join(test_dir, "answers")
+    with open('tests/tests_%i.%i.yaml' % sys.version_info[:2], 'r') as obj:
+        tests = yaml.load(obj)
+
+    base_argv = ['--local-dir=%s' % answers_dir, '-v', '-s', '--nologcapture',
+                 '--with-answer-testing', '--answer-big-data', '--local']
+    args = []
+    for answer in list(tests.keys()):
+        argv = [answer]
+        argv += base_argv
+        argv.append('--xunit-file=%s.xml' % answer)
+        argv.append('--answer-name=%s' % answer)
+        argv += tests[answer]
+        args.append(argv)
+    
+    processes = [mp.Process(target=run_job, args=(args[i],))
+                 for i in range(len(args))]
+    for p in processes:
+        p.start()
+    for p in processes:
+        p.join()
+    for fname in glob.glob("*.out"):
+        with open(fname, 'r') as fin:
+            print(fin.read())
+        os.remove(fname)

diff -r c25fc3d4d895ced3e9d2858bad114b5caeeef98f -r fa997675c1ddb6f400b1462126630dd7af4a31ea tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ b/tests/tests_2.7.yaml
@@ -1,51 +1,51 @@
-local_artio_301:
+local_artio_270:
   - yt/frontends/artio/tests/test_outputs.py
 
-local_athena_301:
+local_athena_270:
   - yt/frontends/athena
 
-local_chombo_302:
+local_chombo_270:
   - yt/frontends/chombo/tests/test_outputs.py
 
-local_enzo_301:
+local_enzo_270:
   - yt/frontends/enzo
 
-local_fits_301:
+local_fits_270:
   - yt/frontends/fits/tests/test_outputs.py
 
-local_flash_301:
+local_flash_270:
   - yt/frontends/flash/tests/test_outputs.py
 
-local_gadget_301:
+local_gadget_270:
   - yt/frontends/gadget/tests/test_outputs.py
 
-local_halos_302:
+local_halos_270:
   - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
   - yt/analysis_modules/halo_finding/tests/test_rockstar.py
   - yt/frontends/owls_subfind/tests/test_outputs.py
 
-local_owls_303:
+local_owls_270:
   - yt/frontends/owls/tests/test_outputs.py
 
-local_pw_303:
+local_pw_270:
   - yt/visualization/tests/test_plotwindow.py:test_attributes
   - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
 
-local_tipsy_305:
+local_tipsy_270:
   - yt/frontends/tipsy/tests/test_outputs.py
 
-local_varia_304:
+local_varia_270:
   - yt/analysis_modules/radmc3d_export
   - yt/frontends/moab/tests/test_c5.py
   - yt/analysis_modules/photon_simulator/tests/test_spectra.py
   - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
   - yt/visualization/volume_rendering/tests/test_vr_orientation.py
 
-local_orion_301:
+local_orion_270:
   - yt/frontends/boxlib/tests/test_orion.py
 
-local_ramses_301:
+local_ramses_270:
   - yt/frontends/ramses/tests/test_outputs.py
 
-local_ytdata_301:
+local_ytdata_270:
   - yt/frontends/ytdata


https://bitbucket.org/yt_analysis/yt/commits/c256f07de6f7/
Changeset:   c256f07de6f7
Branch:      yt
User:        xarthisius
Date:        2015-12-08 17:34:41+00:00
Summary:     merging
Affected #:  15 files

diff -r fa997675c1ddb6f400b1462126630dd7af4a31ea -r c256f07de6f7636657e7c2a911b759d6df5c4cbc doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -468,6 +468,19 @@
 
 All of these projections supply the data object as their base input.
 
+Often, it can be useful to sample a field at the minimum and maximum of a
+different field.  You can use the ``argmax`` and ``argmin`` operations to do
+this.::
+
+  reg.argmin("density", axis="temperature")
+
+This will return the temperature at the minimum density.
+
+If you don't specify an ``axis``, it will return the spatial position of
+the maximum value of the queried field.  Here is an example:::
+
+  x, y, z = reg.argmin("density")
+
 Available Derived Quantities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -494,11 +507,15 @@
     | Usage: ``extrema(fields, non_zero=False)``
     | The extrema of a field or list of fields.
 
-**Maximum Location**
-    | Class :class:`~yt.data_objects.derived_quantities.MaxLocation`
-    | Usage: ``max_location(fields)``
-    | The maximum of a field or list of fields as well
-      as the x,y,z location of that maximum.
+**Maximum Location Sampling**
+    | Class :class:`~yt.data_objects.derived_quantities.SampleAtMaxFieldValues`
+    | Usage: ``sample_at_max_field_values(fields, sample_fields)``
+    | The value of sample_fields at the maximum value in fields.
+
+**Minimum Location Sampling**
+    | Class :class:`~yt.data_objects.derived_quantities.SampleAtMinFieldValues`
+    | Usage: ``sample_at_min_field_values(fields, sample_fields)``
+    | The value of sample_fields at the minimum value in fields.
 
 **Minimum Location**
     | Class :class:`~yt.data_objects.derived_quantities.MinLocation`
@@ -506,6 +523,12 @@
     | The minimum of a field or list of fields as well
       as the x,y,z location of that minimum.
 
+**Maximum Location**
+    | Class :class:`~yt.data_objects.derived_quantities.MaxLocation`
+    | Usage: ``max_location(fields)``
+    | The maximum of a field or list of fields as well
+      as the x,y,z location of that maximum.
+
 **Spin Parameter**
     | Class :class:`~yt.data_objects.derived_quantities.SpinParameter`
     | Usage: ``spin_parameter(use_gas=True, use_particles=True)``

diff -r fa997675c1ddb6f400b1462126630dd7af4a31ea -r c256f07de6f7636657e7c2a911b759d6df5c4cbc doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:0dbaef644354e4d0191367f8f90e6dfd0d3d527925ef0331e1ef381c9099a8cd"
+  "signature": "sha256:6d823c3543f4183db8d28ad5003183515a69ce533fcfff00d92db0372afc3930"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -529,8 +529,21 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "`YTArray`s can be written to disk, to be loaded again to be used in yt or in a different context later. There are two formats that can be written to/read from: HDF5 and ASCII.  \n",
-      "\n",
+      "`YTArray`s can be written to disk, to be loaded again to be used in yt or in a different context later. There are two formats that can be written to/read from: HDF5 and ASCII.  "
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 4,
+     "metadata": {},
+     "source": [
+      "HDF5"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
       "To write to HDF5, use `write_hdf5`:"
      ]
     },
@@ -591,6 +604,38 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
+      "If you want to read/write a dataset from/to a specific group within the HDF5 file, use the `group_name` keyword:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "my_vels.write_hdf5(\"data_in_group.h5\", dataset_name=\"velocity\", info=info, group_name=\"/data/fields\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "where we have used the standard HDF5 slash notation for writing a group hierarchy (e.g., group within a group):"
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 4,
+     "metadata": {},
+     "source": [
+      "ASCII"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
       "To write one or more `YTArray`s to an ASCII text file, use `yt.savetxt`, which works a lot like NumPy's `savetxt`, except with units:"
      ]
     },

diff -r fa997675c1ddb6f400b1462126630dd7af4a31ea -r c256f07de6f7636657e7c2a911b759d6df5c4cbc doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -339,11 +339,11 @@
 .. code-block:: python
 
    #!python
-   class MaximumValue(AnswerTestingTest):
-       _type_name = "ParentageRelationships"
+   class MaximumValueTest(AnswerTestingTest):
+       _type_name = "MaximumValue"
        _attrs = ("field",)
        def __init__(self, ds_fn, field):
-           super(MaximumValue, self).__init__(ds_fn)
+           super(MaximumValueTest, self).__init__(ds_fn)
            self.field = field
 
        def run(self):
@@ -381,10 +381,10 @@
 * Typically for derived values, we compare to 10 or 12 decimal places.
   For exact values, we compare exactly.
 
-How to Add Data to the Testing Suite
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+How To Write Answer Tests for a Frontend
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-To add data to the testing suite, first write a new set of tests for the data.
+To add a new frontend answer test, first write a new set of tests for the data.
 The Enzo example in ``yt/frontends/enzo/tests/test_outputs.py`` is
 considered canonical.  Do these things:
 
@@ -399,8 +399,13 @@
   * This routine should test a number of different fields and data objects.
 
   * The test routine itself should be decorated with
-    ``@requires_ds(path_to_test_dataset)``. This decorator can accept the
-    argument ``big_data=True`` if the test is expensive.
+    ``@requires_ds(test_dataset_name)``. This decorator can accept the
+    argument ``big_data=True`` if the test is expensive. The 
+    ``test_dataset_name`` should be a string containing the path you would pass
+    to the ``yt.load`` function. It does not need to be the full path to the 
+    dataset, since the path will be automatically prepended with the location of
+    the test data directory.  See :ref:`configuration-file` for more information
+    about the ``test_data-dir`` configuration option.
 
   * There are ``small_patch_amr`` and ``big_patch_amr`` routines that you can
     yield from to execute a bunch of standard tests. In addition we have created
@@ -408,7 +413,59 @@
     you should start, and then yield additional tests that stress the outputs in
     whatever ways are necessary to ensure functionality.
 
-  * **All tests should be yielded!**
-
 If you are adding to a frontend that has a few tests already, skip the first
 two steps.
+
+How to Write Image Comparison Tests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We have a number of tests designed to compare images as part of yt. We make use
+of some functionality from matplotlib to automatically compare images and detect
+differences, if any. Image comparison tests are used in the plotting and volume
+rendering machinery.
+
+The easiest way to use the image comparison tests is to make use of the 
+``GenericImageTest`` class. This class takes three arguments:
+
+* A dataset instance (e.g. something you load with ``yt.load`` or 
+  ``data_dir_load``) 
+* A function the test machinery can call which will save an image to disk. The 
+  test class will then find any images that get created and compare them with the
+  stored "correct" answer.
+* An integer specifying the number of decimal places to use when comparing
+  images. A smaller number of decimal places will produce a less stringent test.
+  Matplotlib uses an L2 norm on the full image to do the comparison tests, so
+  this is not a pixel-by-pixel measure, and surprisingly large variations will
+  still pass the test if the strictness of the comparison is not high enough.
+
+You *must* decorate your test function with ``requires_ds``, otherwise the 
+answer testing machinery will not be properly set up.
+
+Here is an example test function:
+
+.. code-block:: python
+
+   from yt.utilities.answer_testing.framework import \
+       GenericImageTest, requires_ds, data_dir_load
+
+   from matplotlib import pyplot as plt
+
+   @requires_ds(my_ds)
+   def test_my_ds():
+       ds = data_dir_load(my_ds)
+       def create_image(filename_prefix):
+           plt.plot([1, 2], [1, 2])
+           plt.savefig(filename_prefix)
+       test = GenericImageTest(ds, create_image, 12)
+       # this ensures a nice test name in nose's output
+       test_my_ds.__description__ = test.description
+       yield test_my_ds
+
+Another good example of an image comparison test is the
+``PlotWindowAttributeTest`` defined in the answer testing framework and used in
+``yt/visualization/tests/test_plotwindow.py``. This test shows how a new answer
+test subclass can be used to programitically test a variety of different methods
+of a complicated class using the same test class. This sort of image comparison
+test is more useful if you are finding yourself writing a ton of boilerplate
+code to get your image comparison test working.  The ``GenericImageTest`` is
+more useful if you only need to do a one-off image comparison test.

diff -r fa997675c1ddb6f400b1462126630dd7af4a31ea -r c256f07de6f7636657e7c2a911b759d6df5c4cbc yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -123,7 +123,7 @@
     s_ds = halo.data_object.ds
     old_sphere = halo.data_object
     max_vals = old_sphere.quantities.max_location(field)
-    new_center = s_ds.arr(max_vals[2:])
+    new_center = s_ds.arr(max_vals[1:])
     new_sphere = s_ds.sphere(new_center.in_units("code_length"),
                                old_sphere.radius.in_units("code_length"))
     mylog.info("Moving sphere center from %s to %s." % (old_sphere.center,

diff -r fa997675c1ddb6f400b1462126630dd7af4a31ea -r c256f07de6f7636657e7c2a911b759d6df5c4cbc yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -87,7 +87,7 @@
         events1 = photons1.project_photons([1.0,-0.5,0.2], responses=[arf,rmf],
                                           absorb_model=tbabs_model, 
                                           convolve_energies=True, prng=prng)
-
+        events1['xsky']
         return_events = return_data(events1.events)
 
         tests.append(GenericArrayTest(ds, return_events, args=[a]))

diff -r fa997675c1ddb6f400b1462126630dd7af4a31ea -r c256f07de6f7636657e7c2a911b759d6df5c4cbc yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -613,10 +613,84 @@
 
     # Numpy-like Operations
     def argmax(self, field, axis=None):
-        raise NotImplementedError
+        r"""Return the values at which the field is maximized.
+
+        This will, in a parallel-aware fashion, find the maximum value and then
+        return to you the values at that maximum location that are requested
+        for "axis".  By default it will return the spatial positions (in the
+        natural coordinate system), but it can be any field
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to maximize.
+        axis : string or list of strings, optional
+            If supplied, the fields to sample along; if not supplied, defaults
+            to the coordinate fields.  This can be the name of the coordinate
+            fields (i.e., 'x', 'y', 'z') or a list of fields, but cannot be 0,
+            1, 2.
+
+        Returns
+        -------
+        A list of YTQuantities as specified by the axis argument.
+
+        Examples
+        --------
+
+        >>> temp_at_max_rho = reg.argmax("density", axis="temperature")
+        >>> max_rho_xyz = reg.argmax("density")
+        >>> t_mrho, v_mrho = reg.argmax("density", axis=["temperature",
+        ...                 "velocity_magnitude"])
+        >>> x, y, z = reg.argmax("density")
+
+        """
+        if axis is None:
+            mv, pos0, pos1, pos2 = self.quantities.max_location(field)
+            return pos0, pos1, pos2
+        rv = self.quantities.sample_at_max_field_values(field, axis)
+        if len(rv) == 2:
+            return rv[1]
+        return rv[1:]
 
     def argmin(self, field, axis=None):
-        raise NotImplementedError
+        r"""Return the values at which the field is minimized.
+
+        This will, in a parallel-aware fashion, find the minimum value and then
+        return to you the values at that minimum location that are requested
+        for "axis".  By default it will return the spatial positions (in the
+        natural coordinate system), but it can be any field
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to minimize.
+        axis : string or list of strings, optional
+            If supplied, the fields to sample along; if not supplied, defaults
+            to the coordinate fields.  This can be the name of the coordinate
+            fields (i.e., 'x', 'y', 'z') or a list of fields, but cannot be 0,
+            1, 2.
+
+        Returns
+        -------
+        A list of YTQuantities as specified by the axis argument.
+
+        Examples
+        --------
+
+        >>> temp_at_min_rho = reg.argmin("density", axis="temperature")
+        >>> min_rho_xyz = reg.argmin("density")
+        >>> t_mrho, v_mrho = reg.argmin("density", axis=["temperature",
+        ...                 "velocity_magnitude"])
+        >>> x, y, z = reg.argmin("density")
+
+        """
+        if axis is None:
+            mv, pos0, pos1, pos2 = self.quantities.min_location(field)
+            return pos0, pos1, pos2
+        rv = self.quantities.sample_at_min_field_values(field, axis)
+        if len(rv) == 2:
+            return rv[1]
+        return rv[1:]
 
     def _compute_extrema(self, field):
         if self._extrema_cache is None:

diff -r fa997675c1ddb6f400b1462126630dd7af4a31ea -r c256f07de6f7636657e7c2a911b759d6df5c4cbc yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -522,10 +522,57 @@
         return [self.data_source.ds.arr([mis.min(), mas.max()])
                 for mis, mas in zip(values[::2], values[1::2])]
 
-class MaxLocation(DerivedQuantity):
+class SampleAtMaxFieldValues(DerivedQuantity):
     r"""
-    Calculates the maximum value plus the index, x, y, and z position
-    of the maximum.
+    Calculates the maximum value and returns whichever fields are asked to be
+    sampled.
+
+    Parameters
+    ----------
+    field : field
+        The field over which the extrema are to be calculated.
+    sample_fields : list of fields
+        The fields to sample and return at the minimum value.
+
+    Examples
+    --------
+
+    >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    >>> ad = ds.all_data()
+    >>> print ad.quantities.sample_at_max_field_values(("gas", "density"),
+    ...         ["temperature", "velocity_magnitude"])
+
+    """
+    def count_values(self, field, sample_fields):
+        # field itself, then index, then the number of sample fields
+        self.num_vals = 1 + len(sample_fields)
+
+    def __call__(self, field, sample_fields):
+        rv = super(SampleAtMaxFieldValues, self).__call__(field, sample_fields)
+        if len(rv) == 1: rv = rv[0]
+        return rv
+
+    def process_chunk(self, data, field, sample_fields):
+        field = data._determine_fields(field)[0]
+        ma = array_like_field(data, -HUGE, field)
+        vals = [array_like_field(data, -1, sf) for sf in sample_fields]
+        maxi = -1
+        if data[field].size > 0:
+            maxi = self._func(data[field])
+            ma = data[field][maxi]
+            vals = [data[sf][maxi] for sf in sample_fields]
+        return (ma,) + tuple(vals)
+
+    def reduce_intermediate(self, values):
+        i = self._func(values[0]) # ma is values[0]
+        return [val[i] for val in values]
+
+    def _func(self, arr):
+        return np.argmax(arr)
+
+class MaxLocation(SampleAtMaxFieldValues):
+    r"""
+    Calculates the maximum value plus the x, y, and z position of the maximum.
 
     Parameters
     ----------
@@ -540,36 +587,39 @@
     >>> print ad.quantities.max_location(("gas", "density"))
 
     """
-    def count_values(self, *args, **kwargs):
-        self.num_vals = 5
-
     def __call__(self, field):
-        rv = super(MaxLocation, self).__call__(field)
+        sample_fields = get_position_fields(field, self.data_source)
+        rv = super(MaxLocation, self).__call__(field, sample_fields)
         if len(rv) == 1: rv = rv[0]
         return rv
 
-    def process_chunk(self, data, field):
-        field = data._determine_fields(field)[0]
-        ma = array_like_field(data, -HUGE, field)
-        position_fields = get_position_fields(field, data)
-        mx = array_like_field(data, -1, position_fields[0])
-        my = array_like_field(data, -1, position_fields[1])
-        mz = array_like_field(data, -1, position_fields[2])
-        maxi = -1
-        if data[field].size > 0:
-            maxi = np.argmax(data[field])
-            ma = data[field][maxi]
-            mx, my, mz = [data[ax][maxi] for ax in position_fields]
-        return (ma, maxi, mx, my, mz)
+class SampleAtMinFieldValues(SampleAtMaxFieldValues):
+    r"""
+    Calculates the minimum value and returns whichever fields are asked to be
+    sampled.
 
-    def reduce_intermediate(self, values):
-        i = np.argmax(values[0]) # ma is values[0]
-        return [val[i] for val in values]
+    Parameters
+    ----------
+    field : field
+        The field over which the extrema are to be calculated.
+    sample_fields : list of fields
+        The fields to sample and return at the minimum value.
 
-class MinLocation(DerivedQuantity):
+    Examples
+    --------
+
+    >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    >>> ad = ds.all_data()
+    >>> print ad.quantities.sample_at_min_field_values(("gas", "density"),
+    ...         ["temperature", "velocity_magnitude"])
+
+    """
+    def _func(self, arr):
+        return np.argmin(arr)
+
+class MinLocation(SampleAtMinFieldValues):
     r"""
-    Calculates the minimum value plus the index, x, y, and z position
-    of the minimum.
+    Calculates the minimum value plus the x, y, and z position of the minimum.
 
     Parameters
     ----------
@@ -584,32 +634,12 @@
     >>> print ad.quantities.min_location(("gas", "density"))
 
     """
-    def count_values(self, *args, **kwargs):
-        self.num_vals = 5
-
     def __call__(self, field):
-        rv = super(MinLocation, self).__call__(field)
+        sample_fields = get_position_fields(field, self.data_source)
+        rv = super(MinLocation, self).__call__(field, sample_fields)
         if len(rv) == 1: rv = rv[0]
         return rv
 
-    def process_chunk(self, data, field):
-        field = data._determine_fields(field)[0]
-        ma = array_like_field(data, HUGE, field)
-        position_fields = get_position_fields(field, data)
-        mx = array_like_field(data, -1, position_fields[0])
-        my = array_like_field(data, -1, position_fields[1])
-        mz = array_like_field(data, -1, position_fields[2])
-        mini = -1
-        if data[field].size > 0:
-            mini = np.argmin(data[field])
-            ma = data[field][mini]
-            mx, my, mz = [data[ax][mini] for ax in position_fields]
-        return (ma, mini, mx, my, mz)
-
-    def reduce_intermediate(self, values):
-        i = np.argmin(values[0]) # ma is values[0]
-        return [val[i] for val in values]
-
 class SpinParameter(DerivedQuantity):
     r"""
     Calculates the dimensionless spin parameter.

diff -r fa997675c1ddb6f400b1462126630dd7af4a31ea -r c256f07de6f7636657e7c2a911b759d6df5c4cbc yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -625,7 +625,7 @@
         """
         mylog.debug("Searching for maximum value of %s", field)
         source = self.all_data()
-        max_val, maxi, mx, my, mz = \
+        max_val, mx, my, mz = \
             source.quantities.max_location(field)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f",
               max_val, mx, my, mz)
@@ -637,7 +637,7 @@
         """
         mylog.debug("Searching for minimum value of %s", field)
         source = self.all_data()
-        min_val, maxi, mx, my, mz = \
+        min_val, mx, my, mz = \
             source.quantities.min_location(field)
         mylog.info("Min Value is %0.5e at %0.16f %0.16f %0.16f",
               min_val, mx, my, mz)

diff -r fa997675c1ddb6f400b1462126630dd7af4a31ea -r c256f07de6f7636657e7c2a911b759d6df5c4cbc yt/data_objects/tests/test_derived_quantities.py
--- a/yt/data_objects/tests/test_derived_quantities.py
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -55,6 +55,68 @@
                         ad["cell_mass"].sum())
         yield assert_rel_equal, my_std, a_std, 12
 
+def test_max_location():
+    for nprocs in [1, 2, 4, 8]:
+        ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", ))
+        ad = ds.all_data()
+
+        mv, x, y, z = ad.quantities.max_location(("gas", "density"))
+
+        yield assert_equal, mv, ad["density"].max()
+
+        mi = np.argmax(ad["density"])
+
+        yield assert_equal, ad["x"][mi], x
+        yield assert_equal, ad["y"][mi], y
+        yield assert_equal, ad["z"][mi], z
+
+def test_min_location():
+    for nprocs in [1, 2, 4, 8]:
+        ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", ))
+        ad = ds.all_data()
+
+        mv, x, y, z = ad.quantities.min_location(("gas", "density"))
+
+        yield assert_equal, mv, ad["density"].min()
+
+        mi = np.argmin(ad["density"])
+
+        yield assert_equal, ad["x"][mi], x
+        yield assert_equal, ad["y"][mi], y
+        yield assert_equal, ad["z"][mi], z
+
+def test_sample_at_min_field_values():
+    for nprocs in [1, 2, 4, 8]:
+        ds = fake_random_ds(16, nprocs = nprocs,
+            fields = ("density", "temperature", "velocity_x"))
+        ad = ds.all_data()
+
+        mv, temp, vm = ad.quantities.sample_at_min_field_values(
+            "density", ["temperature", "velocity_x"])
+
+        yield assert_equal, mv, ad["density"].min()
+
+        mi = np.argmin(ad["density"])
+
+        yield assert_equal, ad["temperature"][mi], temp
+        yield assert_equal, ad["velocity_x"][mi], vm
+
+def test_sample_at_max_field_values():
+    for nprocs in [1, 2, 4, 8]:
+        ds = fake_random_ds(16, nprocs = nprocs,
+            fields = ("density", "temperature", "velocity_x"))
+        ad = ds.all_data()
+
+        mv, temp, vm = ad.quantities.sample_at_max_field_values(
+            "density", ["temperature", "velocity_x"])
+
+        yield assert_equal, mv, ad["density"].max()
+
+        mi = np.argmax(ad["density"])
+
+        yield assert_equal, ad["temperature"][mi], temp
+        yield assert_equal, ad["velocity_x"][mi], vm
+
 if __name__ == "__main__":
     for i in test_extrema():
         i[0](*i[1:])

diff -r fa997675c1ddb6f400b1462126630dd7af4a31ea -r c256f07de6f7636657e7c2a911b759d6df5c4cbc yt/data_objects/tests/test_numpy_ops.py
--- a/yt/data_objects/tests/test_numpy_ops.py
+++ b/yt/data_objects/tests/test_numpy_ops.py
@@ -1,4 +1,5 @@
 from yt.testing import fake_random_ds, fake_amr_ds, assert_equal
+import numpy as np
 
 
 def setup():
@@ -93,3 +94,51 @@
         qrho, qtemp = ad.min(["density", "temperature"])
         yield assert_equal, qrho, ad["density"].min()
         yield assert_equal, qtemp, ad["temperature"].min()
+
+def test_argmin():
+    for nprocs in [-1, 1, 2, 16]:
+        if nprocs == -1:
+            ds = fake_amr_ds(fields=("density","temperature"))
+        else:
+            ds = fake_random_ds(32, nprocs=nprocs,
+                fields=("density","temperature"))
+
+        ad = ds.all_data()
+
+        q = ad.argmin("density", axis=["density"])
+        yield assert_equal, q, ad["density"].min()
+
+        q1, q2 = ad.argmin("density", axis=["density", "temperature"])
+        mi = np.argmin(ad["density"])
+        yield assert_equal, q1, ad["density"].min()
+        yield assert_equal, q2, ad["temperature"][mi]
+
+        pos = ad.argmin("density")
+        mi = np.argmin(ad["density"])
+        yield assert_equal, pos[0], ad["x"][mi]
+        yield assert_equal, pos[1], ad["y"][mi]
+        yield assert_equal, pos[2], ad["z"][mi]
+
+def test_argmax():
+    for nprocs in [-1, 1, 2, 16]:
+        if nprocs == -1:
+            ds = fake_amr_ds(fields=("density","temperature"))
+        else:
+            ds = fake_random_ds(32, nprocs=nprocs,
+                fields=("density","temperature"))
+
+        ad = ds.all_data()
+
+        q = ad.argmax("density", axis=["density"])
+        yield assert_equal, q, ad["density"].max()
+
+        q1, q2 = ad.argmax("density", axis=["density", "temperature"])
+        mi = np.argmax(ad["density"])
+        yield assert_equal, q1, ad["density"].max()
+        yield assert_equal, q2, ad["temperature"][mi]
+
+        pos = ad.argmax("density")
+        mi = np.argmax(ad["density"])
+        yield assert_equal, pos[0], ad["x"][mi]
+        yield assert_equal, pos[1], ad["y"][mi]
+        yield assert_equal, pos[2], ad["z"][mi]

diff -r fa997675c1ddb6f400b1462126630dd7af4a31ea -r c256f07de6f7636657e7c2a911b759d6df5c4cbc yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -199,7 +199,7 @@
         if finest_levels is not False:
             source.min_level = self.max_level - finest_levels
         mylog.debug("Searching for maximum value of %s", field)
-        max_val, maxi, mx, my, mz = \
+        max_val, mx, my, mz = \
             source.quantities["MaxLocation"](field)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f",
                    max_val, mx, my, mz)

diff -r fa997675c1ddb6f400b1462126630dd7af4a31ea -r c256f07de6f7636657e7c2a911b759d6df5c4cbc yt/geometry/object_finding_mixin.py
--- a/yt/geometry/object_finding_mixin.py
+++ b/yt/geometry/object_finding_mixin.py
@@ -79,7 +79,7 @@
             source = self.all_data()
         mylog.debug("Searching %s grids for maximum value of %s",
                     len(source._grids), field)
-        max_val, maxi, mx, my, mz = \
+        max_val, mx, my, mz = \
             source.quantities["MaxLocation"]( field )
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", 
               max_val, mx, my, mz)

diff -r fa997675c1ddb6f400b1462126630dd7af4a31ea -r c256f07de6f7636657e7c2a911b759d6df5c4cbc yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -925,6 +925,12 @@
     yield assert_equal, warr, iarr
     yield assert_equal, warr.units.registry['code_length'], iarr.units.registry['code_length']
 
+    warr.write_hdf5('test.h5', dataset_name="test_dset", group_name='/arrays/test_group')
+
+    giarr = YTArray.from_hdf5('test.h5', dataset_name="test_dset", group_name='/arrays/test_group')
+
+    yield assert_equal, warr, giarr
+
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 

diff -r fa997675c1ddb6f400b1462126630dd7af4a31ea -r c256f07de6f7636657e7c2a911b759d6df5c4cbc yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -719,8 +719,8 @@
     # End unit conversion methods
     #
 
-    def write_hdf5(self, filename, dataset_name=None, info=None):
-        r"""Writes ImageArray to hdf5 file.
+    def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None):
+        r"""Writes a YTArray to hdf5 file.
 
         Parameters
         ----------
@@ -733,16 +733,17 @@
         info: dictionary
             A dictionary of supplementary info to write to append as attributes
             to the dataset.
+            
+        group_name: string
+            An optional group to write the arrays to. If not specified, the arrays
+            are datasets at the top level by default.
 
         Examples
         --------
         >>> a = YTArray([1,2,3], 'cm')
-
         >>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
-
         >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
         ...              info=myinfo)
-
         """
         import h5py
         from yt.extern.six.moves import cPickle as pickle
@@ -756,8 +757,15 @@
             dataset_name = 'array_data'
 
         f = h5py.File(filename)
-        if dataset_name in f.keys():
-            d = f[dataset_name]
+        if group_name is not None:
+            if group_name in f:
+                g = f[group_name]
+            else:
+                g = f.create_group(group_name)
+        else:
+            g = f
+        if dataset_name in g.keys():
+            d = g[dataset_name]
             # Overwrite without deleting if we can get away with it.
             if d.shape == self.shape and d.dtype == self.dtype:
                 d[:] = self
@@ -765,16 +773,16 @@
                     del d.attrs[k]
             else:
                 del f[dataset_name]
-                d = f.create_dataset(dataset_name, data=self)
+                d = g.create_dataset(dataset_name, data=self)
         else:
-            d = f.create_dataset(dataset_name, data=self)
+            d = g.create_dataset(dataset_name, data=self)
 
         for k, v in info.items():
             d.attrs[k] = v
         f.close()
 
     @classmethod
-    def from_hdf5(cls, filename, dataset_name=None):
+    def from_hdf5(cls, filename, dataset_name=None, group_name=None):
         r"""Attempts read in and convert a dataset in an hdf5 file into a YTArray.
 
         Parameters
@@ -786,6 +794,10 @@
             The name of the dataset to read from.  If the dataset has a units
             attribute, attempt to infer units as well.
 
+        group_name: string
+            An optional group to read the arrays from. If not specified, the arrays
+            are datasets at the top level by default.
+
         """
         import h5py
         from yt.extern.six.moves import cPickle as pickle
@@ -794,7 +806,11 @@
             dataset_name = 'array_data'
 
         f = h5py.File(filename)
-        dataset = f[dataset_name]
+        if group_name is not None:
+            g = f[group_name]
+        else:
+            g = f
+        dataset = g[dataset_name]
         data = dataset[:]
         units = dataset.attrs.get('units', '')
         if 'unit_registry' in dataset.attrs.keys():


https://bitbucket.org/yt_analysis/yt/commits/3ac828d0201c/
Changeset:   3ac828d0201c
Branch:      yt
User:        xarthisius
Date:        2015-12-08 20:13:03+00:00
Summary:     Add timeout for forked processes
Affected #:  1 file

diff -r c256f07de6f7636657e7c2a911b759d6df5c4cbc -r 3ac828d0201c74d4c7ebeb581df77565c8559639 tests/nose_runner.py
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -41,8 +41,11 @@
     for p in processes:
         p.start()
     for p in processes:
-        p.join()
+        p.join(timeout=7200)
+        if p.is_alive():
+            p.terminate()
+            p.join(timeout=30)
     for fname in glob.glob("*.out"):
         with open(fname, 'r') as fin:
             print(fin.read())
-        os.remove(fname)
+        os.remove(fname)
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/9bb31720a110/
Changeset:   9bb31720a110
Branch:      yt
User:        xarthisius
Date:        2015-12-08 21:19:50+00:00
Summary:     Add tests set for py34
Affected #:  2 files

diff -r 3ac828d0201c74d4c7ebeb581df77565c8559639 -r 9bb31720a110c0b225e41ca5be97ca818f95e7a1 tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ b/tests/tests_2.7.yaml
@@ -48,4 +48,4 @@
   - yt/frontends/ramses/tests/test_outputs.py
 
 local_ytdata_270:
-  - yt/frontends/ytdata
+  - yt/frontends/ytdata
\ No newline at end of file

diff -r 3ac828d0201c74d4c7ebeb581df77565c8559639 -r 9bb31720a110c0b225e41ca5be97ca818f95e7a1 tests/tests_3.4.yaml
--- /dev/null
+++ b/tests/tests_3.4.yaml
@@ -0,0 +1,49 @@
+local_artio_340:
+  - yt/frontends/artio/tests/test_outputs.py
+
+local_athena_340:
+  - yt/frontends/athena
+
+local_chombo_340:
+  - yt/frontends/chombo/tests/test_outputs.py
+
+local_enzo_340:
+  - yt/frontends/enzo
+
+local_fits_340:
+  - yt/frontends/fits/tests/test_outputs.py
+
+local_flash_340:
+  - yt/frontends/flash/tests/test_outputs.py
+
+local_gadget_340:
+  - yt/frontends/gadget/tests/test_outputs.py
+
+local_halos_340:
+  - yt/frontends/owls_subfind/tests/test_outputs.py
+
+local_owls_340:
+  - yt/frontends/owls/tests/test_outputs.py
+
+local_pw_340:
+  - yt/visualization/tests/test_plotwindow.py:test_attributes
+  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+
+local_tipsy_340:
+  - yt/frontends/tipsy/tests/test_outputs.py
+
+local_varia_340:
+  - yt/analysis_modules/radmc3d_export
+  - yt/frontends/moab/tests/test_c5.py
+  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+
+local_orion_340:
+  - yt/frontends/boxlib/tests/test_orion.py
+
+local_ramses_340:
+  - yt/frontends/ramses/tests/test_outputs.py
+
+local_ytdata_340:
+  - yt/frontends/ytdata
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/8749b4851a29/
Changeset:   8749b4851a29
Branch:      yt
User:        xarthisius
Date:        2015-12-08 22:19:40+00:00
Summary:     Add unittests runner
Affected #:  1 file

diff -r 9bb31720a110c0b225e41ca5be97ca818f95e7a1 -r 8749b4851a293d389b9e051cf901bac35fa2b91e tests/nose_runner.py
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -27,7 +27,7 @@
 
     base_argv = ['--local-dir=%s' % answers_dir, '-v', '-s', '--nologcapture',
                  '--with-answer-testing', '--answer-big-data', '--local']
-    args = []
+    args = [['unittests', '-v', '-s', '--nologcapture']]
     for answer in list(tests.keys()):
         argv = [answer]
         argv += base_argv
@@ -48,4 +48,4 @@
     for fname in glob.glob("*.out"):
         with open(fname, 'r') as fin:
             print(fin.read())
-        os.remove(fname)
\ No newline at end of file
+        os.remove(fname)


https://bitbucket.org/yt_analysis/yt/commits/a3ac41259857/
Changeset:   a3ac41259857
Branch:      yt
User:        xarthisius
Date:        2015-12-16 15:56:49+00:00
Summary:     merging
Affected #:  95 files

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -22,4 +22,21 @@
 ngoldbau at ucsc.edu = goldbaum at ucolick.org
 biondo at wisc.edu = Biondo at wisc.edu
 samgeen at googlemail.com = samgeen at gmail.com
-fbogert = fbogert at ucsc.edu
\ No newline at end of file
+fbogert = fbogert at ucsc.edu
+bwoshea = oshea at msu.edu
+mornkr at slac.stanford.edu = me at jihoonkim.org
+kbarrow = kssbarrow at gatech.edu
+kssbarrow at gmail.com = kssbarrow at gatech.edu
+kassbarrow at gmail.com = kssbarrow at gatech.edu
+antoine.strugarek at cea.fr = strugarek at astro.umontreal.ca
+rosen at ucolick.org = alrosen at ucsc.edu
+jzuhone = jzuhone at gmail.com
+karraki at nmsu.edu = karraki at gmail.com
+hckr at eml.cc = astrohckr at gmail.com
+julian3 at illinois.edu = astrohckr at gmail.com
+cosmosquark = bthompson2090 at gmail.com
+chris.m.malone at lanl.gov = chris.m.malone at gmail.com
+jnaiman at ucolick.org = jnaiman
+migueld.deval = miguel at archlinux.net
+slevy at ncsa.illinois.edu = salevy at illinois.edu
+malzraa at gmail.com = kellerbw at mcmaster.ca
\ No newline at end of file

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -57,6 +57,11 @@
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
 yt/utilities/lib/write_array.c
+yt/utilities/lib/element_mappings.c
+yt/utilities/lib/mesh_construction.cpp
+yt/utilities/lib/mesh_samplers.cpp
+yt/utilities/lib/mesh_traversal.cpp
+yt/utilities/lib/mesh_intersection.cpp
 syntax: glob
 *.pyc
 .*.swp

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -4,20 +4,30 @@
                 Tom Abel (tabel at stanford.edu)
                 Gabriel Altay (gabriel.altay at gmail.com)
                 Kenza Arraki (karraki at gmail.com)
+                Kirk Barrow (kssbarrow at gatech.edu)
+                Ricarda Beckmann (Ricarda.Beckmann at astro.ox.ac.uk)
                 Elliott Biondo (biondo at wisc.edu)
                 Alex Bogert (fbogert at ucsc.edu)
+                André-Patrick Bubel (code at andre-bubel.de)
                 Pengfei Chen (madcpf at gmail.com)
                 David Collins (dcollins4096 at gmail.com)
                 Brian Crosby (crosby.bd at gmail.com)
                 Andrew Cunningham (ajcunn at gmail.com)
                 Miguel de Val-Borro (miguel.deval at gmail.com)
+                Bili Dong (qobilidop at gmail.com)
+                Nicholas Earl (nchlsearl at gmail.com)
                 Hilary Egan (hilaryye at gmail.com)
+                Daniel Fenn (df11c at my.fsu.edu)
                 John Forces (jforbes at ucolick.org)
+                Adam Ginsburg (keflavich at gmail.com)
                 Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
+                William Gray (graywilliamj at gmail.com)
                 Markus Haider (markus.haider at uibk.ac.at)
                 Eric Hallman (hallman13 at gmail.com)
                 Cameron Hummels (chummels at gmail.com)
+                Anni Järvenpää (anni.jarvenpaa at gmail.com)
+                Allyson Julian (astrohckr at gmail.com)
                 Christian Karch (chiffre at posteo.de)
                 Ben W. Keller (kellerbw at mcmaster.ca)
                 Ji-hoon Kim (me at jihoonkim.org)
@@ -25,11 +35,15 @@
                 Kacper Kowalik (xarthisius.kk at gmail.com)
                 Mark Krumholz (mkrumhol at ucsc.edu)
                 Michael Kuhlen (mqk at astro.berkeley.edu)
+                Meagan Lang (langmm.astro at gmail.com)
+                Doris Lee (dorislee at berkeley.edu)
                 Eve Lee (elee at cita.utoronto.ca)
                 Sam Leitner (sam.leitner at gmail.com)
+                Stuart Levy (salevy at illinois.edu)
                 Yuan Li (yuan at astro.columbia.edu)
                 Chris Malone (chris.m.malone at gmail.com)
                 Josh Maloney (joshua.moloney at colorado.edu)
+                Jonah Miller (jonah.maxwell.miller at gmail.com)
                 Chris Moody (cemoody at ucsc.edu)
                 Stuart Mumford (stuart at mumford.me.uk)
                 Andrew Myers (atmyers at astro.berkeley.edu)
@@ -44,6 +58,7 @@
                 Mark Richardson (Mark.L.Richardson at asu.edu)
                 Thomas Robitaille (thomas.robitaille at gmail.com)
                 Anna Rosen (rosen at ucolick.org)
+                Chuck Rozhon (rozhon2 at illinois.edu)
                 Douglas Rudd (drudd at uchicago.edu)
                 Anthony Scopatz (scopatz at gmail.com)
                 Noel Scudder (noel.scudder at stonybrook.edu)
@@ -59,6 +74,7 @@
                 Ji Suoqing (jisuoqing at gmail.com)
                 Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
                 Benjamin Thompson (bthompson2090 at gmail.com)
+                Robert Thompson (rthompsonj at gmail.com)
                 Stephanie Tonnesen (stonnes at gmail.com)
                 Matthew Turk (matthewturk at gmail.com)
                 Rich Wagner (rwagner at physics.ucsd.edu)

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -327,6 +327,100 @@
 
 .. _loading-fits-data:
 
+Exodus II Data
+--------------
+
+Exodus II is a file format for Finite Element datasets that is used by the MOOSE
+framework for file IO. Support for this format (and for unstructured mesh data in 
+general) is a new feature as of yt 3.3, so while we aim to fully support it, we also expect 
+there to be some buggy features at present. Currently, yt can visualize first-order
+mesh types only (4-node quads, 8-node hexes, 3-node triangles, and 4-node tetrahedra).
+Development of higher-order visualization capability is a work in progress.
+
+To load an Exodus II dataset, you can use the ``yt.load`` command on the Exodus II
+file:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+
+Because Exodus II datasets can have multiple steps (which can correspond to time steps, 
+picard iterations, non-linear solve iterations, etc...), you can also specify a step
+argument when you load an Exodus II data that defines the index at which to look when
+you read data from the file.
+
+You can access the connectivity information directly by doing:
+
+.. code-block:: python
+    
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   print(ds.index.meshes[0].connectivity_coords)
+   print(ds.index.meshes[0].connectivity_indices)
+   print(ds.index.meshes[1].connectivity_coords)
+   print(ds.index.meshes[1].connectivity_indices)
+
+This particular dataset has two meshes in it, both of which are made of 8-node hexes.
+yt uses a field name convention to access these different meshes in plots and data
+objects. To see all the fields found in a particlular dataset, you can do:
+
+.. code-block:: python
+    
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   print(ds.field_list)
+
+This will give you a list of field names like ``('connect1', 'diffused')`` and 
+``('connect2', 'convected')``. Here, fields labelled with ``'connect1'`` correspond to the
+first mesh, and those with ``'connect2'`` to the second, and so on. To grab the value
+of the ``'convected'`` variable at all the nodes in the first mesh, for example, you
+would do:
+
+.. code-block:: python
+    
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ad = ds.all_data()  # geometric selection, this just grabs everything
+   print(ad['connect1', 'convected'])
+
+In this dataset, ``('connect1', 'convected')`` is nodal field, meaning that the field values
+are defined at the vertices of the elements. If we examine the shape of the returned array:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ad = ds.all_data()
+   print(ad['connect1', 'convected'].shape)
+
+we see that this mesh has 12480 8-node hexahedral elements, and that we get 8 field values
+for each element. To get the vertex positions at which these field values are defined, we
+can do, for instance:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ad = ds.all_data()
+   print(ad['connect1', 'vertex_x'])
+
+If we instead look at an element-centered field, like ``('connect1', 'conv_indicator')``,
+we get:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ad = ds.all_data()
+   print(ad['connect1', 'conv_indicator'].shape)
+
+we instead get only one field value per element.
+
+For information about visualizing unstructured mesh data, including Exodus II datasets, 
+please see :ref:`unstructured-mesh-slices` and :ref:`unstructured_mesh_rendering`. 
+
+
 FITS Data
 ---------
 
@@ -1035,8 +1129,8 @@
 
 In addition to the above grid types, you can also load data stored on
 unstructured meshes. This type of mesh is used, for example, in many
-finite element calculations. Currently, hexahedral, tetrahedral, and
-wedge-shaped mesh element are supported.
+finite element calculations. Currently, hexahedral and tetrahedral
+mesh elements are supported.
 
 To load an unstructured mesh, you need to specify the following. First,
 you need to have a coordinates array, which should be an (L, 3) array

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c doc/source/reference/changelog.rst
--- a/doc/source/reference/changelog.rst
+++ b/doc/source/reference/changelog.rst
@@ -22,7 +22,7 @@
 * Late-stage beta support for Python 3 - unit tests and answer tests pass for 
   all the major frontends under python 3.4, and yt should now be mostly if not 
   fully usable.  Because many of the yt developers are still on Python 2 at 
-  this point, this should be considered a “late stage beta” as there may be 
+  this point, this should be considered a "late stage beta" as there may be 
   remaining issues yet to be identified or worked out.
 * Now supporting Gadget Friend-of-Friends/Subfind catalogs - see here to learn 
   how to load halo catalogs as regular yt datasets.

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -355,6 +355,78 @@
 keyword arguments, as described in
 :class:`~yt.visualization.plot_window.OffAxisProjectionPlot`
 
+.. _unstructured-mesh-slices:
+
+Unstructured Mesh Slices
+------------------------
+
+Unstructured Mesh datasets can be sliced using the same syntax as above.
+Here is an example script using a publically available MOOSE dataset:
+
+.. python-script::
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
+   sl = yt.SlicePlot(ds, 'x', ('connect1', 'diffused'))
+   sl.zoom(0.75)
+   sl.save()
+
+Here, we plot the ``'diffused'`` variable, using a slice normal to the ``'x'`` direction, 
+through the meshed labelled by ``'connect1'``. By default, the slice goes through the
+center of the domain. We have also zoomed out a bit to get a better view of the 
+resulting structure. To instead plot the ``'convected'`` variable, using a slice normal
+to the ``'z'`` direction through the mesh labelled by ``'connect2'``, we do:
+
+.. python-script::
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
+   sl = yt.SlicePlot(ds, 'z', ('connect2', 'convected'))
+   sl.zoom(0.75)
+   sl.save()
+
+These slices are made by sampling the finite element solution at the points corresponding 
+to each pixel of the image. The ``'convected'`` and ``'diffused'`` variables are node-centered,
+so this interpolation is performed by converting the sample point the reference coordinate
+system of the element and evaluating the appropriate shape functions. You can also
+plot element-centered fields:
+
+.. python-script::
+
+   import yt
+   ds = yt.load('MOOSE_sample_data/out.e-s010')
+   sl = yt.SlicePlot(ds, 'y', ('connect1', 'conv_indicator'))
+   sl.zoom(0.75)
+   sl.save()
+
+We can also annotate the mesh lines, as follows:
+
+.. python-script::
+
+   import yt
+   ds = yt.load('MOOSE_sample_data/out.e-s010')
+   sl = yt.SlicePlot(ds, 'z', ('connect1', 'diffused'))
+   sl.annotate_mesh_lines(thresh=0.1)
+   sl.zoom(0.75)
+   sl.save()
+
+This annotation is performed by marking the pixels where the mapped coordinate is close
+to the element boundary. What counts as 'close' (in the mapped coordinate system) is 
+determined by the ``thresh`` parameter, which can be varied to make the lines thicker or
+thinner.
+
+Finally, slices can also be used to examine 2D unstructured mesh datasets, but the
+slices must be taken to be normal to the ``'z'`` axis, or you'll get an error. Here is
+an example using another MOOSE dataset:
+
+.. python-script::
+
+   import yt
+   ds = yt.load('MOOSE_sample_data/out.e')
+   sl = yt.SlicePlot(ds, 2, ('connect1', 'nodal_aux'))
+   sl.save()
+
+
 Plot Customization: Recentering, Resizing, Colormaps, and More
 --------------------------------------------------------------
 

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -3,19 +3,16 @@
 Unstructured Mesh Rendering
 ===========================
 
+Installation
+^^^^^^^^^^^^
+
 Beginning with version 3.3, yt has the ability to volume render unstructured
-meshes from, for example, finite element calculations. In order to use this
-capability, a few additional dependencies are required beyond those you get
-when you run the install script. First, `embree <https://embree.github.io>`_
+mesh data - like that created by finite element calculations, for example. 
+In order to use this capability, a few additional dependencies are required 
+beyond those you get when you run the install script. First, `embree <https://embree.github.io>`_
 (a fast software ray-tracing library from Intel) must be installed, either
 by compiling from source or by using one of the pre-built binaries available
-at Embree's `downloads <https://embree.github.io/downloads.html>`_ page. Once
-Embree is installed, you must also create a symlink next to the library. For
-example, if the libraries were installed at /usr/local/lib/, you must do
-
-.. code-block:: bash
-
-    sudo ln -s /usr/local/lib/libembree.2.6.1.dylib /usr/local/lib/libembree.so
+at Embree's `downloads <https://embree.github.io/downloads.html>`_ page. 
 
 Second, the python bindings for embree (called 
 `pyembree <https://github.com/scopatz/pyembree>`_) must also be installed. To
@@ -25,23 +22,39 @@
 
     git clone https://github.com/scopatz/pyembree
 
-To install, navigate to the root directory and run the setup script:
+To install, navigate to the root directory and run the setup script.
+If Embree was installed to some location that is not in your path by default,
+you will need to pass in CFLAGS and LDFLAGS to the setup.py script. For example,
+the Mac OS X package installer puts the installation at /opt/local/ instead of 
+usr/local. To account for this, you would do:
 
 .. code-block:: bash
 
-    python setup.py develop
+    CFLAGS='-I/opt/local/include' LDFLAGS='-L/opt/local/lib' python setup.py install
 
-If Embree was installed to some location that is not in your path by default,
-you will need to pass in CFLAGS and LDFLAGS to the setup.py script. For example,
-the Mac OS package installer puts the installation at /opt/local/ instead of 
-usr/local. To account for this, you would do:
+Once embree and pyembree are installed, you must rebuild yt from source in order to use
+the unstructured mesh rendering capability. Once again, if embree is installed in a 
+location that is not part of your default search path, you must tell yt where to find it.
+There are a number of ways to do this. One way is to again manually pass in the flags
+when running the setup script in the yt-hg directory:
 
 .. code-block:: bash
 
     CFLAGS='-I/opt/local/include' LDFLAGS='-L/opt/local/lib' python setup.py develop
 
-You must also use these flags when building any part of yt that links against
-pyembree.
+You can also set EMBREE_DIR environment variable to '/opt/local', in which case
+you could just run 
+
+.. code-block:: bash
+   
+   python setup.py develop
+
+as usual. Finally, if you create a file called embree.cfg in the yt-hg directory with
+the location of the embree installation, the setup script will find this and use it, 
+provided EMBREE_DIR is not set. We recommend one of the later two methods, especially
+if you plan on re-compiling the cython extensions regularly. Note that none of this is
+neccessary if you installed embree into a location that is in your default path, such
+as /usr/local.
 
 Once the pre-requisites are installed, unstructured mesh data can be rendered
 much like any other dataset. In particular, a new type of 
@@ -55,120 +68,293 @@
 :class:`~yt.visualization.volume_rendering.render_source.RenderSource` is called,
 a set of rays are cast at the source. Each time a ray strikes the source mesh,
 the data is sampled at the intersection point at the resulting value gets 
-saved into an image.
+saved into an image. See below for examples.
 
-See below for examples. First, here is an example of rendering a hexahedral mesh.
+Examples
+^^^^^^^^
+
+First, here is an example of rendering an 8-node, hexahedral MOOSE dataset.
 
 .. python-script::
 
    import yt
-   import pylab as plt
-   from yt.visualization.volume_rendering.render_source import MeshSource
-   from yt.visualization.volume_rendering.camera import Camera
-   from yt.utilities.exodusII_reader import get_data
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
 
-   # load the data
-   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
-   mesh_id = 0
-   field_name = ('gas', 'diffused')
-   ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
 
-   # create the RenderSource
-   ms = MeshSource(ds, field_name)
+   ms = MeshSource(ds, ('connect1', 'diffused'))
 
-   # set up camera
+   # setup the camera
    cam = Camera(ds)
-   camera_position = ds.arr([-3.0, 3.0, -3.0], 'code_length')
-   north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
-   cam.resolution = (800, 800)
-   cam.set_position(camera_position, north_vector)
+   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
 
-   # make the image
-   im = ms.render(cam)
+   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
+   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
+   cam.set_position(cam_pos, north_vector)
 
-   # plot and save
-   plt.imshow(im, cmap='Eos A', origin='lower', vmin=0, vmax=2.0)
-   plt.gca().axes.get_xaxis().set_visible(False)
-   plt.gca().axes.get_yaxis().set_visible(False)
-   cb = plt.colorbar()
-   cb.set_label(field_name[1])
-   plt.savefig('hex_mesh_render.png')
+   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
+   pw.write_png(im, 'hex_mesh_render.png')
 
-Next, here is an example of rendering a dataset with tetrahedral mesh elements.
+You can also overplot the mesh boundaries:
 
 .. python-script::
 
    import yt
-   import pylab as plt
-   from yt.visualization.volume_rendering.render_source import MeshSource
-   from yt.visualization.volume_rendering.camera import Camera
-   from yt.utilities.exodusII_reader import get_data
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
 
-   # load the data
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
+
+   ms = MeshSource(ds, ('connect1', 'diffused'))
+
+   # setup the camera
+   cam = Camera(ds)
+   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
+
+   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
+   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
+   cam.set_position(cam_pos, north_vector)
+   cam.resolution = (800, 800)
+
+   ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
+   im = ms.annotate_mesh_lines()
+   pw.write_png(im, 'hex_render_with_mesh.png')
+
+As with slices, you can visualize different meshes and different fields. For example,
+Here is a script similar to the above that plots the "diffused" variable 
+using the mesh labelled by "connect2":
+
+.. python-script::
+
+   import yt
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
+
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
+
+   ms = MeshSource(ds, ('connect2', 'diffused'))
+
+   # setup the camera
+   cam = Camera(ds)
+   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
+
+   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
+   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
+   cam.set_position(cam_pos, north_vector)
+
+   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
+   pw.write_png(im, 'hex_mesh_render.png')
+
+Next, here is an example of rendering a dataset with tetrahedral mesh elements.
+Note that in this dataset, there are multiple "steps" per file, so we specify
+that we want to look at the last one.
+
+.. python-script::
+
+   import yt
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
+
    filename = "MOOSE_sample_data/high_order_elems_tet4_refine_out.e"
-   coords, connectivity, data = get_data(filename)
-   mesh_id = 0
-   field_name = ('gas', 'u')
-   ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+   ds = yt.load(filename, step=-1)  # we look at the last time frame
 
-   # create the RenderSource
-   ms = MeshSource(ds, field_name)
+   ms = MeshSource(ds, ('connect1', 'u'))
 
-   # set up camera
+   # setup the camera 
    cam = Camera(ds)
    camera_position = ds.arr([3.0, 3.0, 3.0], 'code_length')
    cam.set_width(ds.arr([2.0, 2.0, 2.0], 'code_length'))
    north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
+   cam.set_position(camera_position, north_vector)
+
+   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 1.0))
+   pw.write_png(im, 'tetra_render.png')
+
+Another example, this time plotting the temperature field from a 20-node hex 
+MOOSE dataset:
+
+.. python-script::
+
+   import yt
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
+
+   ds = yt.load("MOOSE_sample_data/mps_out.e", step=-1)  # we load the last time frame
+
+   ms = MeshSource(ds, ('connect2', 'temp'))
+
+   # set up the camera
+   cam = Camera(ds)
+   camera_position = ds.arr([-1.0, 1.0, -0.5], 'code_length')
+   north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+   cam.width = ds.arr([0.04, 0.04, 0.04], 'code_length')
    cam.resolution = (800, 800)
    cam.set_position(camera_position, north_vector)
 
-   # make the image
-   im = ms.render(cam)
+   im = ms.render(cam, cmap='hot', color_bounds=(500.0, 1700.0))
+   im = ms.annotate_mesh_lines()
+   pw.write_png(im, 'hex20_render.png')
 
-   # plot and save
-   plt.imshow(im, cmap='Eos A', origin='lower', vmin=0.0, vmax=1.0)
-   plt.gca().axes.get_xaxis().set_visible(False)
-   plt.gca().axes.get_yaxis().set_visible(False)
-   cb = plt.colorbar()
-   cb.set_label(field_name[1])
-   plt.savefig('tet_mesh_render.png')
+As with other volume renderings in yt, you can swap out different lenses. Here is 
+an example that uses a "perspective" lens, for which the rays diverge from the 
+camera position according to some opening angle:
 
-Finally, here is a script that creates frames of a movie. It calls the rotate()
-method 300 times, saving a new image to the disk each time.
+.. python-script::
+
+   import yt
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
+
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
+
+   ms = MeshSource(ds, ('connect2', 'diffused'))
+
+   # setup the camera
+   cam = Camera(ds, lens_type='perspective')
+   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
+
+   cam_pos = ds.arr([-4.5, 4.5, -4.5], 'code_length')  # the camera location
+   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
+   cam.set_position(cam_pos, north_vector)
+
+   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
+   im = ms.annotate_mesh_lines()
+   pw.write_png(im, 'hex_mesh_render_perspective.png')
+
+You can also create scenes that have multiple meshes. The ray-tracing infrastructure
+will keep track of the depth information for each source separately, and composite
+the final image accordingly. In the next example, we show how to render a scene 
+with two meshes on it:
+
+.. python-script::
+
+    import yt
+    from yt.visualization.volume_rendering.api import MeshSource, Camera, Scene
+    import yt.utilities.png_writer as pw
+
+    ds = yt.load("MOOSE_sample_data/out.e-s010")
+
+    # this time we create an empty scene and add sources to it one-by-one
+    sc = Scene()
+
+    cam = Camera(ds)
+    cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    cam.set_position(ds.arr([-3.0, 3.0, -3.0], 'code_length'),
+                     ds.arr([0.0, 1.0, 0.0], 'dimensionless'))
+    cam.set_width = ds.arr([8.0, 8.0, 8.0], 'code_length')
+    cam.resolution = (800, 800)
+
+    sc.camera = cam
+
+    # create two distinct MeshSources from 'connect1' and 'connect2'
+    ms1 = MeshSource(ds, ('connect1', 'diffused'))
+    ms2 = MeshSource(ds, ('connect2', 'diffused'))
+
+    sc.add_source(ms1)
+    sc.add_source(ms2)
+
+    im = sc.render()
+
+    pw.write_png(im, 'composite_render.png')
+
+
+Making Movies
+^^^^^^^^^^^^^
+
+Here are a couple of example scripts that show how to create image frames that 
+can later be stiched together into a movie. In the first example, we look at a 
+single dataset at a fixed time, but we move the camera around to get a different
+vantage point. We call the rotate() method 300 times, saving a new image to the 
+disk each time.
 
 .. code-block:: python
 
    import yt
-   import pylab as plt
-   from yt.visualization.volume_rendering.render_source import MeshSource
-   from yt.visualization.volume_rendering.camera import Camera
-   from yt.utilities.exodusII_reader import get_data
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
 
-   # load dataset
-   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
-   mesh_id = 0
-   field_name = ('gas', 'diffused')
-   ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
 
-   # create the RenderSource
-   ms = MeshSource(ds, field_name)
+   ms = MeshSource(ds, ('connect1', 'diffused'))
 
-   # set up camera
+   # setup the camera
    cam = Camera(ds)
-   camera_position = ds.arr([-3.0, 3.0, -3.0], 'code_length')
-   north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
-   cam.set_position(camera_position, north_vector)
+   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
+
+   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
+   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
+   cam.set_position(cam_pos, north_vector)
+   cam.resolution = (800, 800)
    cam.steady_north = True
 
    # make movie frames
    num_frames = 301
    for i in range(num_frames):
        cam.rotate(2.0*np.pi/num_frames)
-       im = ms.render(cam)
-       plt.imshow(im, cmap='Eos A', origin='lower',vmin=0.0, vmax=2.0)
-       plt.gca().axes.get_xaxis().set_visible(False)
-       plt.gca().axes.get_yaxis().set_visible(False)
-       cb = plt.colorbar()
-       cb.set_label('diffused')
-       plt.savefig('movie_frames/surface_render_%.4d.png' % i)
-       plt.clf()
+       im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
+       pw.write_png(im, 'movie_frames/surface_render_%.4d.png' % i)
+
+Finally, this example demonstrates how to loop over the time steps in a single
+file with a fixed camera position:
+
+.. code-block:: python
+
+    import yt
+    from yt.visualization.volume_rendering.api import MeshSource, Camera
+    import pylab as plt
+
+    NUM_STEPS = 127
+    CMAP = 'hot'
+    VMIN = 300.0
+    VMAX = 2000.0
+
+    for step in range(NUM_STEPS):
+
+        ds = yt.load("MOOSE_sample_data/mps_out.e", step=step)
+
+	time = ds._get_current_time()
+
+	# the field name is a tuple of strings. The first string
+	# specifies which mesh will be plotted, the second string
+	# specifies the name of the field.
+	field_name = ('connect2', 'temp')
+
+	# this initializes the render source
+	ms = MeshSource(ds, field_name)
+
+	# set up the camera here. these values were arrived by
+	# calling pitch, yaw, and roll in the notebook until I
+	# got the angle I wanted.
+	cam = Camera(ds)
+	camera_position = ds.arr([0.1, 0.0, 0.1], 'code_length')
+	cam.focus = ds.domain_center
+	north_vector = ds.arr([0.3032476, 0.71782557, -0.62671153], 'dimensionless')
+	cam.width = ds.arr([ 0.04,  0.04,  0.04], 'code_length')
+	cam.resolution = (800, 800)
+	cam.set_position(camera_position, north_vector)
+
+	# actually make the image here
+	im = ms.render(cam, cmap=CMAP, color_bounds=(VMIN, VMAX))
+
+	# Plot the result using matplotlib and save.
+	# Note that we are setting the upper and lower
+	# bounds of the colorbar to be the same for all
+	# frames of the image.
+
+	# must clear the image between frames
+	plt.clf()
+	fig = plt.gcf()
+	ax = plt.gca()
+	ax.imshow(im, interpolation='nearest', origin='lower')
+
+	# Add the colorbar using a fake (not shown) image.
+	p = ax.imshow(ms.data, visible=False, cmap=CMAP, vmin=VMIN, vmax=VMAX)
+	cb = fig.colorbar(p)
+	cb.set_label(field_name[1])
+
+	ax.text(25, 750, 'time = %.2e' % time, color='k')
+	ax.axes.get_xaxis().set_visible(False)
+	ax.axes.get_yaxis().set_visible(False)
+
+	plt.savefig('movie_frames/test_%.3d' % step)

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c requirements.txt
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,3 +4,4 @@
 h5py==2.5.0 
 nose==1.3.6 
 sympy==0.7.6 
+

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1344,7 +1344,8 @@
 
     def _retrieve_halos(self):
         # First get the halo particulars.
-        lines = file("%s.out" % self.basename)
+        with open("%s.out" % self.basename, 'r') as fh:
+            lines = fh.readlines()
         # The location of particle data for each halo.
         locations = self._collect_halo_data_locations()
         halo = 0
@@ -1395,7 +1396,8 @@
 
     def _collect_halo_data_locations(self):
         # The halos are listed in order in the file.
-        lines = file("%s.txt" % self.basename)
+        with open("%s.txt" % self.basename, 'r') as fh:
+            lines = fh.readlines()
         locations = []
         realpath = path.realpath("%s.txt" % self.basename)
         for line in lines:
@@ -1408,7 +1410,6 @@
                 item = item.split("/")
                 temp.append(path.join(path.dirname(realpath), item[-1]))
             locations.append(temp)
-        lines.close()
         return locations
 
 class TextHaloList(HaloList):
@@ -1422,7 +1423,8 @@
 
     def _retrieve_halos(self, fname, columns, comment):
         # First get the halo particulars.
-        lines = file(fname)
+        with open(fname, 'r') as fh:
+            lines = fh.readlines()
         halo = 0
         base_set = ['x', 'y', 'z', 'r']
         keys = columns.keys()

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -17,9 +17,11 @@
 import numpy as np
 from functools import wraps
 import fileinput
+import io
 from re import finditer
 from tempfile import TemporaryFile
 import os
+import sys
 import zipfile
 
 from yt.config import ytcfg
@@ -592,20 +594,14 @@
         return tuple(self.ActiveDimensions.tolist())
 
     def _setup_data_source(self):
-        LE = self.left_edge - self.base_dds
-        RE = self.right_edge + self.base_dds
-        if not all(self.ds.periodicity):
-            for i in range(3):
-                if self.ds.periodicity[i]: continue
-                LE[i] = max(LE[i], self.ds.domain_left_edge[i])
-                RE[i] = min(RE[i], self.ds.domain_right_edge[i])
-        self._data_source = self.ds.region(self.center, LE, RE)
+        self._data_source = self.ds.region(self.center,
+            self.left_edge, self.right_edge)
         self._data_source.min_level = 0
         self._data_source.max_level = self.level
-        self._pdata_source = self.ds.region(self.center,
-            self.left_edge, self.right_edge)
-        self._pdata_source.min_level = 0
-        self._pdata_source.max_level = self.level
+        # This triggers "special" behavior in the RegionSelector to ensure we
+        # select *cells* whose bounding boxes overlap with our region, not just
+        # their cell centers.
+        self._data_source.loose_selection = True
 
     def get_data(self, fields = None):
         if fields is None: return
@@ -644,7 +640,7 @@
 
     def _fill_particles(self, part):
         for p in part:
-            self[p] = self._pdata_source[p]
+            self[p] = self._data_source[p]
 
     def _fill_fields(self, fields):
         fields = [f for f in fields if f not in self.field_data]
@@ -1278,14 +1274,13 @@
     def _color_samples_obj(self, cs, em, color_log, emit_log, color_map, arr,
                            color_field_max, color_field_min, color_field,
                            emit_field_max, emit_field_min, emit_field): # this now holds for obj files
-        from sys import version
         if color_field is not None:
             if color_log: cs = np.log10(cs)
         if emit_field is not None:
             if emit_log: em = np.log10(em)
         if color_field is not None:
             if color_field_min is None:
-                if version >= '3':
+                if sys.version_info > (3, ):
                     cs = [float(field) for field in cs]
                     cs = np.array(cs)
                 mi = cs.min()
@@ -1293,7 +1288,7 @@
                 mi = color_field_min
                 if color_log: mi = np.log10(mi)
             if color_field_max is None:
-                if version >= '3':
+                if sys.version_info > (3, ):
                     cs = [float(field) for field in cs]
                     cs = np.array(cs)
                 ma = cs.max()
@@ -1311,7 +1306,7 @@
         # now, get emission
         if emit_field is not None:
             if emit_field_min is None:
-                if version >= '3':
+                if sys.version_info > (3, ):
                     em = [float(field) for field in em]
                     em = np.array(em)
                 emi = em.min()
@@ -1319,7 +1314,7 @@
                 emi = emit_field_min
                 if emit_log: emi = np.log10(emi)
             if emit_field_max is None:
-                if version >= '3':
+                if sys.version_info > (3, ):
                     em = [float(field) for field in em]
                     em = np.array(em)
                 ema = em.max()
@@ -1339,15 +1334,9 @@
                     color_log = True, emit_log = True, plot_index = None,
                     color_field_max = None, color_field_min = None,
                     emit_field_max = None, emit_field_min = None):
-        from sys import version
-        from io import IOBase
         if plot_index is None:
             plot_index = 0
-        if version < '3':
-            checker = file
-        else:
-            checker = IOBase
-        if isinstance(filename, checker):
+        if isinstance(filename, io.IOBase):
             fobj = filename + '.obj'
             fmtl = filename + '.mtl'
         else:
@@ -1639,7 +1628,7 @@
     @parallel_root_only
     def _export_ply(self, filename, bounds = None, color_field = None,
                    color_map = "algae", color_log = True, sample_type = "face"):
-        if isinstance(filename, file):
+        if isinstance(filename, io.IOBase):
             f = filename
         else:
             f = open(filename, "wb")

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -588,7 +588,7 @@
                         extra_attrs=extra_attrs)
 
         return filename
-        
+
     def to_glue(self, fields, label="yt", data_collection=None):
         """
         Takes specific *fields* in the container and exports them to

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/data_objects/region_expression.py
--- a/yt/data_objects/region_expression.py
+++ b/yt/data_objects/region_expression.py
@@ -12,8 +12,8 @@
 #-----------------------------------------------------------------------------
 
 import weakref
-import types
 
+from yt.extern.six import string_types
 from yt.utilities.exceptions import YTDimensionalityError
 
 class RegionExpression(object):
@@ -31,11 +31,11 @@
         # At first, we will only implement this as accepting a slice that is
         # (optionally) unitful corresponding to a specific set of coordinates
         # that result in a rectangular prism or a slice.
-        if isinstance(item, types.StringTypes):
+        if isinstance(item, string_types):
             # This is some field; we will instead pass this back to the
             # all_data object.
             return self.all_data[item]
-        if isinstance(item, tuple) and isinstance(item[1], types.StringTypes):
+        if isinstance(item, tuple) and isinstance(item[1], string_types):
             return self.all_data[item]
         if len(item) != self.ds.dimensionality:
             # Not the right specification, and we don't want to do anything

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -1141,5 +1141,5 @@
     def _calculate_offsets(self, fields):
         pass
 
-    def __cmp__(self, other):
-        return cmp(self.filename, other.filename)
+    def __lt__(self, other):
+        return self.filename < other.filename

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -59,6 +59,9 @@
         self._current_fluid_type = self.ds.default_fluid_type
 
     def _check_consistency(self):
+        if self.connectivity_indices.shape[1] != self._connectivity_length:
+            raise RuntimeError
+
         for gi in range(self.connectivity_indices.shape[0]):
             ind = self.connectivity_indices[gi, :] - self._index_offset
             coords = self.connectivity_coords[ind, :]
@@ -136,7 +139,7 @@
         mask = self._get_selector_mask(selector)
         count = self.count(selector)
         if count == 0: return 0
-        dest[offset:offset+count] = source[mask,...]
+        dest[offset:offset+count] = source[mask, ...]
         return count
 
     def count(self, selector):
@@ -167,11 +170,12 @@
 
     def select_fcoords_vertex(self, dobj = None):
         mask = self._get_selector_mask(dobj.selector)
-        if mask is None: return np.empty((0,self._connectivity_length,3), dtype='float64')
+        if mask is None: return np.empty((0, self._connectivity_length, 3), dtype='float64')
         vertices = self.connectivity_coords[
                 self.connectivity_indices - 1]
         return vertices[mask, :, :]
 
+
 class SemiStructuredMesh(UnstructuredMesh):
     _connectivity_length = 8
     _type_name = 'semi_structured_mesh'

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -14,7 +14,7 @@
 import contextlib
 import inspect
 
-from yt.extern.six import string_types
+from yt.extern.six import string_types, PY2
 from yt.funcs import \
     ensure_list
 from .field_exceptions import \
@@ -215,9 +215,14 @@
         return data_label
 
     def __repr__(self):
+        if PY2:
+            func_name = self._function.func_name
+        else:
+            func_name = self._function.__name__
+
         if self._function == NullFunc:
             s = "On-Disk Field "
-        elif self._function.func_name == "_TranslationFunc":
+        elif func_name == "_TranslationFunc":
             s = "Alias Field for \"%s\" " % (self._function.alias_name,)
         else:
             s = "Derived Field "

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -24,6 +24,7 @@
     'chombo',
     'eagle',
     'enzo',
+    'exodus_ii',
     'fits',
     'flash',
     'gadget',

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -152,10 +152,7 @@
         self.dataset_type = dataset_type
         # for now, the index file is the dataset!
         self.index_filename = os.path.join(os.getcwd(), self.dataset.filename)
-        if PY2:
-            self._fhandle = file(self.index_filename,'rb')
-        else:
-            self._fhandle = open(self.index_filename,'rb')
+        self._fhandle = open(self.index_filename,'rb')
         GridIndex.__init__(self, ds, dataset_type)
 
         self._fhandle.close()

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/exodus_ii/__init__.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/__init__.py
@@ -0,0 +1,14 @@
+"""
+API for yt.frontends.exodus_ii
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/exodus_ii/api.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/api.py
@@ -0,0 +1,27 @@
+"""
+API for yt.frontends.exodus_ii
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+    ExodusIIUnstructuredMesh, \
+    ExodusIIUnstructuredIndex, \
+    ExodusIIDataset
+
+from .fields import \
+    ExodusIIFieldInfo
+
+from .io import \
+    IOHandlerExodusII
+
+from . import tests

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/exodus_ii/data_structures.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -0,0 +1,262 @@
+"""
+Exodus II data structures
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+import numpy as np
+
+from yt.geometry.unstructured_mesh_handler import \
+    UnstructuredIndex
+from yt.data_objects.unstructured_mesh import \
+    UnstructuredMesh
+from yt.data_objects.static_output import \
+    Dataset
+from .io import \
+    NetCDF4FileHandler
+from yt.utilities.logger import ytLogger as mylog
+from .fields import \
+    ExodusIIFieldInfo
+from .util import \
+    load_info_records, sanitize_string
+
+
+class ExodusIIUnstructuredMesh(UnstructuredMesh):
+    _index_offset = 1
+
+    def __init__(self, *args, **kwargs):
+        super(ExodusIIUnstructuredMesh, self).__init__(*args, **kwargs)
+
+
+class ExodusIIUnstructuredIndex(UnstructuredIndex):
+    def __init__(self, ds, dataset_type = 'exodus_ii'):
+        super(ExodusIIUnstructuredIndex, self).__init__(ds, dataset_type)
+
+    def _initialize_mesh(self):
+        coords = self.ds._read_coordinates()
+        self.meshes = [ExodusIIUnstructuredMesh(
+            mesh_id, self.index_filename, conn_ind, coords, self)
+                       for mesh_id, conn_ind in
+                       enumerate(self.ds._read_connectivity())]
+
+    def _detect_output_fields(self):
+        elem_names = self.dataset.parameters['elem_names']
+        node_names = self.dataset.parameters['nod_names']
+        fnames = elem_names + node_names
+        self.field_list = []
+        for i in range(1, len(self.meshes)+1):
+            self.field_list += [('connect%d' % i, fname) for fname in fnames]
+
+
+class ExodusIIDataset(Dataset):
+    _index_class = ExodusIIUnstructuredIndex
+    _field_info_class = ExodusIIFieldInfo
+
+    def __init__(self,
+                 filename,
+                 step=0,
+                 dataset_type='exodus_ii',
+                 storage_filename=None,
+                 units_override=None):
+
+        self.parameter_filename = filename
+        self.fluid_types += self._get_fluid_types()
+        self.step = step
+        super(ExodusIIDataset, self).__init__(filename, dataset_type,
+                                              units_override=units_override)
+        self.index_filename = filename
+        self.storage_filename = storage_filename
+
+    def _set_code_unit_attributes(self):
+        # This is where quantities are created that represent the various
+        # on-disk units.  These are the currently available quantities which
+        # should be set, along with examples of how to set them to standard
+        # values.
+        #
+        self.length_unit = self.quan(1.0, "cm")
+        self.mass_unit = self.quan(1.0, "g")
+        self.time_unit = self.quan(1.0, "s")
+        #
+        # These can also be set:
+        # self.velocity_unit = self.quan(1.0, "cm/s")
+        # self.magnetic_unit = self.quan(1.0, "gauss")
+
+    def _parse_parameter_file(self):
+        self._handle = NetCDF4FileHandler(self.parameter_filename)
+        self._vars = self._handle.dataset.variables
+        self._read_glo_var()
+        self.dimensionality = self._vars['coor_names'].shape[0]
+        self.parameters['info_records'] = self._load_info_records()
+        self.unique_identifier = self._get_unique_identifier()
+        self.num_steps = len(self._vars['time_whole'])
+        self.current_time = self._get_current_time()
+        self.parameters['num_meshes'] = self._vars['eb_status'].shape[0]
+        self.parameters['elem_names'] = self._get_elem_names()
+        self.parameters['nod_names'] = self._get_nod_names()
+        self.domain_left_edge = self._load_domain_edge(0)
+        self.domain_right_edge = self._load_domain_edge(1)
+
+        # set up psuedo-3D for lodim datasets here
+        if self.dimensionality == 2:
+            self.domain_left_edge = np.append(self.domain_left_edge, 0.0)
+            self.domain_right_edge = np.append(self.domain_right_edge, 1.0)
+
+        self.periodicity = (False, False, False)
+
+        # These attributes don't really make sense for unstructured
+        # mesh data, but yt warns if they are not present, so we set
+        # them to dummy values here.
+        self.domain_dimensions = np.ones(3, "int32")
+        self.cosmological_simulation = 0
+        self.current_redshift = 0
+        self.omega_lambda = 0
+        self.omega_matter = 0
+        self.hubble_constant = 0
+        self.refine_by = 0
+
+    def _get_fluid_types(self):
+        handle = NetCDF4FileHandler(self.parameter_filename).dataset
+        fluid_types = ()
+        i = 1
+        while True:
+            ftype = 'connect%d' % i
+            if ftype in handle.variables:
+                fluid_types += (ftype,)
+                i += 1
+            else:
+                break
+        return fluid_types
+
+    def _read_glo_var(self):
+        """
+        Adds each global variable to the dict of parameters
+
+        """
+        names = self._get_glo_names()
+        if not names:
+            return
+        values = self._vars['vals_glo_var'][:].transpose()
+        for name, value in zip(names, values):
+            self.parameters[name] = value
+
+    def _load_info_records(self):
+        """
+        Returns parsed version of the info_records.
+        """
+        try:
+            return load_info_records(self._vars['info_records'])
+        except (KeyError, TypeError):
+            mylog.warning("No info_records found")
+            return []
+
+    def _get_unique_identifier(self):
+        return self.parameter_filename.__hash__()
+
+    def _get_current_time(self):
+        try:
+            return self._vars['time_whole'][self.step]
+        except IndexError:
+            raise RuntimeError("Invalid step number, max is %d" \
+                               % (self.num_steps - 1))            
+        except (KeyError, TypeError):
+            return 0.0
+
+    def _get_glo_names(self):
+        """
+
+        Returns the names of the global vars, if available.
+
+        """
+
+        if "name_glo_var" not in self._vars:
+            mylog.warning("name_glo_var not found")
+            return []
+        else:
+            return [sanitize_string(v.tostring()) for v in
+                    self._vars["name_glo_var"]]
+            
+    def _get_elem_names(self):
+        """
+
+        Returns the names of the element vars, if available.
+
+        """
+
+        if "name_elem_var" not in self._vars:
+            mylog.warning("name_elem_var not found")
+            return []
+        else:
+            return [sanitize_string(v.tostring()) for v in
+                    self._vars["name_elem_var"]]
+
+    def _get_nod_names(self):
+        """
+
+        Returns the names of the node vars, if available
+
+        """
+
+        if "name_nod_var" not in self._vars:
+            mylog.warning("name_nod_var not found")
+            return []
+        else:
+            return [sanitize_string(v.tostring()) for v in
+                    self._vars["name_nod_var"]]
+
+    def _read_coordinates(self):
+        """
+
+        Loads the coordinates for the mesh
+
+        """
+        
+        coord_axes = 'xyz'[:self.dimensionality]
+
+        mylog.info("Loading coordinates")
+        if "coord" not in self._vars:
+            return np.array([self._vars["coord%s" % ax][:]
+                             for ax in coord_axes]).transpose().copy()
+        else:
+            return np.array([coord for coord in
+                             self._vars["coord"][:]]).transpose().copy()
+
+    def _read_connectivity(self):
+        """
+        Loads the connectivity data for the mesh
+        """
+        mylog.info("Loading connectivity")
+        connectivity = []
+        for i in range(self.parameters['num_meshes']):
+            connectivity.append(self._vars["connect%d" % (i+1)][:].astype("i8"))
+        return connectivity
+
+    def _load_domain_edge(self, domain_idx):
+        """
+        Loads the boundaries for the domain edge
+
+        Parameters:
+        - domain_idx: 0 corresponds to the left edge, 1 corresponds to the right edge
+        """
+        if domain_idx == 0:
+            return self._read_coordinates().min(axis=0)
+        if domain_idx == 1:
+            return self._read_coordinates().max(axis=0)
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            from netCDF4 import Dataset
+            filename = args[0]
+            with Dataset(filename) as f:
+                f.variables['connect1']
+            return True
+        except:
+            pass

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/exodus_ii/definitions.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/definitions.py
@@ -0,0 +1,1 @@
+# This file is often empty.  It can hold definitions related to a frontend.

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/exodus_ii/fields.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/fields.py
@@ -0,0 +1,45 @@
+"""
+ExodusII-specific fields
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+
+# We need to specify which fields we might have in our dataset.  The field info
+# container subclass here will define which fields it knows about.  There are
+# optionally methods on it that get called which can be subclassed.
+
+class ExodusIIFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+        # Each entry here is of the form
+        # ( "name", ("units", ["fields", "to", "alias"], # "display_name")),
+    )
+
+    known_particle_fields = (
+        # Identical form to above
+        # ( "name", ("units", ["fields", "to", "alias"], # "display_name")),
+    )
+
+    def __init__(self, ds, field_list):
+        super(ExodusIIFieldInfo, self).__init__(ds, field_list)
+        # If you want, you can check self.field_list
+
+    def setup_fluid_fields(self):
+        # Here we do anything that might need info about the dataset.
+        # You can use self.alias, self.add_output_field and self.add_field .
+        pass
+
+    def setup_particle_fields(self, ptype):
+        # This will get called for every particle type.
+        pass

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/exodus_ii/io.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/io.py
@@ -0,0 +1,84 @@
+"""
+ExodusII-specific IO functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.utilities.io_handler import \
+    BaseIOHandler
+from yt.utilities.file_handler import \
+    NetCDF4FileHandler
+
+
+class IOHandlerExodusII(BaseIOHandler):
+    _particle_reader = False
+    _dataset_type = "exodus_ii"
+    _INDEX_OFFSET = 1
+
+    def __init__(self, ds):
+        self.filename = ds.index_filename
+        exodus_ii_handler = NetCDF4FileHandler(self.filename)
+        self.handler = exodus_ii_handler.dataset
+        super(IOHandlerExodusII, self).__init__(ds)
+        self.node_fields = ds._get_nod_names()
+        self.elem_fields = ds._get_elem_names()
+
+    def _read_particle_coords(self, chunks, ptf):
+        pass
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        pass
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        # This needs to allocate a set of arrays inside a dictionary, where the
+        # keys are the (ftype, fname) tuples and the values are arrays that
+        # have been masked using whatever selector method is appropriate.  The
+        # dict gets returned at the end and it should be flat, with selected
+        # data.  Note that if you're reading grid data, you might need to
+        # special-case a grid selector object.
+        chunks = list(chunks)
+        rv = {}
+        for field in fields:
+            ftype, fname = field
+            ci = self.handler.variables[ftype][:] - self._INDEX_OFFSET
+            num_elem = ci.shape[0]
+            if fname in self.node_fields:
+                nodes_per_element = ci.shape[1]
+                rv[field] = np.empty((num_elem, nodes_per_element), dtype="float64")
+            elif fname in self.elem_fields:
+                rv[field] = np.empty(num_elem, dtype="float64")
+        for field in fields:
+            ind = 0
+            ftype, fname = field
+            mesh_id = int(ftype[-1])
+            chunk = chunks[mesh_id - 1]
+            ci = self.handler.variables[ftype][:] - self._INDEX_OFFSET
+            if fname in self.node_fields:
+                field_ind = self.node_fields.index(fname)
+                fdata = self.handler.variables['vals_nod_var%d' % (field_ind + 1)]
+                data = fdata[self.ds.step][ci]
+                for g in chunk.objs:
+                    ind += g.select(selector, data, rv[field], ind)  # caches
+            if fname in self.elem_fields:
+                field_ind = self.elem_fields.index(fname)
+                fdata = self.handler.variables['vals_elem_var%deb%s' %
+                                               (field_ind + 1, mesh_id)][:]
+                data = fdata[self.ds.step, :]
+                for g in chunk.objs:
+                    ind += g.select(selector, data, rv[field], ind)  # caches
+        return rv
+
+    def _read_chunk_data(self, chunk, fields):
+        # This reads the data from a single chunk, and is only used for
+        # caching.
+        pass

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/exodus_ii/setup.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/setup.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('exodus_ii', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/exodus_ii/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/tests/test_outputs.py
@@ -0,0 +1,52 @@
+"""
+Exodus II frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import \
+    assert_equal, \
+    assert_array_equal, \
+    requires_file
+from yt.utilities.answer_testing.framework import \
+    data_dir_load
+
+out = "ExodusII/out.e"
+
+
+ at requires_file(out)
+def test_out():
+    ds = data_dir_load(out)
+    yield assert_equal, str(ds), "out.e"
+    yield assert_equal, ds.dimensionality, 3
+    yield assert_equal, ds.unique_identifier, 5081193338833632556
+    yield assert_equal, ds.current_time, 0.0
+    yield assert_array_equal, ds.parameters['nod_names'], ['convected', 'diffused']
+    yield assert_equal, ds.parameters['num_meshes'], 2
+
+out_s002 = "ExodusII/out.e-s002"
+
+
+ at requires_file(out_s002)
+def test_out002():
+    ds = data_dir_load(out_s002)
+    yield assert_equal, str(ds), "out.e-s002"
+    yield assert_equal, ds.dimensionality, 3
+    yield assert_equal, ds.current_time, 2.0
+
+gold = "ExodusII/gold.e"
+
+
+ at requires_file(gold)
+def test_gold():
+    ds = data_dir_load(gold)
+    yield assert_equal, str(ds), "gold.e"

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/exodus_ii/util.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/util.py
@@ -0,0 +1,53 @@
+import string
+from itertools import takewhile
+from collections import OrderedDict
+import re
+
+def sanitize_string(s):
+    return "".join(_ for _ in takewhile(lambda a: a in string.printable, s))
+
+def load_info_records(info_records):
+    info_records_parsed = [sanitize_string(line_chars) for line_chars in info_records]
+    return group_by_sections(info_records_parsed)
+
+def group_by_sections(info_records):
+    # 1. Split by top groupings
+    top_levels = get_top_levels(info_records)
+    # 2. Determine if in section by index number
+    grouped = OrderedDict()
+    for tidx, top_level in enumerate(top_levels):
+        grouped[top_level[1]] = []
+
+        try:
+            next_idx = top_levels[tidx + 1][0]
+        except IndexError:
+            next_idx = len(info_records) - 1
+
+        for idx in range(top_level[0], next_idx):       
+            if idx == top_level[0]:
+                continue
+
+            grouped[top_level[1]].append(info_records[idx])
+
+    
+    if 'Version Info' in grouped.keys():
+        version_info = OrderedDict()
+        for line in grouped['Version Info']:
+            split_line = line.split(":")
+            key = split_line[0]
+            val = ":".join(split_line[1:]).lstrip().rstrip()
+            if key != '':
+                version_info[key] = val
+        grouped['Version Info'] = version_info
+    
+    return grouped
+
+def get_top_levels(info_records):
+    top_levels = []
+    for idx, line in enumerate(info_records):
+        pattern = re.compile("###[a-zA-Z\s]+")
+        if pattern.match(line):
+            clean_line = re.sub(r'[^\w\s]', '', line).lstrip().rstrip()
+            top_levels.append([idx, clean_line])
+    
+    return top_levels

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/gadget/simulation_handling.py
--- a/yt/frontends/gadget/simulation_handling.py
+++ b/yt/frontends/gadget/simulation_handling.py
@@ -375,7 +375,7 @@
         else:
             if self.parameters["OutputListOn"]:
                 a_values = [float(a) for a in 
-                           file(self.parameters["OutputListFilename"], "r").readlines()]
+                            open(self.parameters["OutputListFilename"], "r").readlines()]
             else:
                 a_values = [float(self.parameters["TimeOfFirstSnapshot"])]
                 time_max = float(self.parameters["TimeMax"])

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/owls_subfind/io.py
--- a/yt/frontends/owls_subfind/io.py
+++ b/yt/frontends/owls_subfind/io.py
@@ -42,7 +42,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files, key=lambda f: f.filename):
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     pcount = data_file.total_particles[ptype]
@@ -76,7 +76,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files, key=lambda f: f.filename):
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     pcount = data_file.total_particles[ptype]

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -11,6 +11,7 @@
     config.add_subpackage("athena")
     config.add_subpackage("boxlib")
     config.add_subpackage("chombo")
+    config.add_subpackage("exodus_ii")
     config.add_subpackage("eagle")
     config.add_subpackage("enzo")
     config.add_subpackage("fits")
@@ -37,6 +38,7 @@
     config.add_subpackage("chombo/tests")
     config.add_subpackage("eagle/tests")
     config.add_subpackage("enzo/tests")
+    config.add_subpackage("exodus_ii/tests")
     config.add_subpackage("fits/tests")
     config.add_subpackage("flash/tests")
     config.add_subpackage("gadget/tests")
@@ -47,6 +49,7 @@
     config.add_subpackage("ramses/tests")
     config.add_subpackage("rockstar/tests")
     config.add_subpackage("stream/tests")
+    config.add_subpackage("stream/sample_data")
     config.add_subpackage("tipsy/tests")
     config.add_subpackage("ytdata/tests")
     return config

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -34,3 +34,5 @@
       IOHandlerStream
 
 from . import tests
+
+from . import sample_data

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -139,7 +139,7 @@
         self.io = io
         self.particle_types = particle_types
         self.periodicity = periodicity
-            
+
     def get_fields(self):
         return self.fields.all_fields
 
@@ -149,7 +149,7 @@
             return self.particle_types[field]
         else :
             return False
-        
+
 class StreamHierarchy(GridIndex):
 
     grid = StreamGrid
@@ -293,7 +293,7 @@
         #self._parameter_override = parameter_override
         #if conversion_override is None: conversion_override = {}
         #self._conversion_override = conversion_override
-
+        self.fluid_types += ("stream",)
         self.geometry = geometry
         self.stream_handler = stream_handler
         name = "InMemoryParameterFile_%s" % (uuid.uuid4().hex)
@@ -375,7 +375,7 @@
         if len(s) == 1:
             field = ("io", k)
         elif len(s) == 3:
-            field = ("gas", k)
+            field = ("stream", k)
         elif len(s) == 0:
             continue
         else:
@@ -501,7 +501,7 @@
         elif len(data[field].shape) in (1, 2):
             new_field = ("io", field)
         elif len(data[field].shape) == 3:
-            new_field = ("gas", field)
+            new_field = ("stream", field)
         else:
             raise RuntimeError
         new_data[new_field] = data[field]
@@ -1517,9 +1517,9 @@
 
     field_units, data = unitify_data(data)
     sfh = StreamDictFieldHandler()
-    
+
     particle_types = set_particle_types(data)
-    
+
     sfh.update({0:data})
     grid_left_edges = domain_left_edge
     grid_right_edges = domain_right_edge
@@ -1603,12 +1603,13 @@
     _field_info_class = StreamFieldInfo
     _dataset_type = "stream_unstructured"
 
-def load_unstructured_mesh(data, connectivity, coordinates,
-                         length_unit = None, bbox=None, sim_time=0.0,
-                         mass_unit = None, time_unit = None,
-                         velocity_unit = None, magnetic_unit = None,
-                         periodicity=(False, False, False),
-                         geometry = "cartesian"):
+
+def load_unstructured_mesh(connectivity, coordinates, node_data=None,
+                           elem_data=None, length_unit=None, bbox=None,
+                           sim_time=0.0, mass_unit=None, time_unit=None,
+                           velocity_unit=None, magnetic_unit=None,
+                           periodicity=(False, False, False),
+                           geometry = "cartesian"):
     r"""Load an unstructured mesh of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -1622,10 +1623,6 @@
 
     Parameters
     ----------
-    data : dict or list of dicts
-        This is a list of dicts of numpy arrays, where each element in the list
-        is a different mesh, and where the keys of dicts are the field names.
-        If a dict is supplied, this will be assumed to be the only mesh.
     connectivity : list of array_like or array_like
         This is the connectivity array for the meshes; this should either be a
         list where each element in the list is a numpy array or a single numpy
@@ -1635,6 +1632,18 @@
     coordinates : array_like
         This should be of size (L,3) where L is the number of vertices
         indicated in the connectivity matrix.
+    node_data : dict or list of dicts
+        This is a list of dicts of numpy arrays, where each element in the list
+        is a different mesh, and where the keys of dicts are the field names.
+        If a dict is supplied, this will be assumed to be the only mesh. These
+        data fields are assumed to be node-centered, i.e. there must be one
+        value for every node in the mesh.
+    elem_data : dict or list of dicts
+        This is a list of dicts of numpy arrays, where each element in the list
+        is a different mesh, and where the keys of dicts are the field names.
+        If a dict is supplied, this will be assumed to be the only mesh. These
+        data fields are assumed to be element-centered, i.e. there must be only
+        one value for every element in the mesh.
     bbox : array_like (xdim:zdim, LE:RE), optional
         Size of computational domain in units of the length unit.
     sim_time : float, optional
@@ -1662,8 +1671,33 @@
 
     domain_dimensions = np.ones(3, "int32") * 2
     nprocs = 1
+
+    if elem_data is None and node_data is None:
+        raise RuntimeError("No data supplied in load_unstructured_mesh.")
+
+    if isinstance(connectivity, list):
+        num_meshes = len(connectivity)
+    else:
+        num_meshes = 1
+    connectivity = ensure_list(connectivity)
+
+    if elem_data is None:
+        elem_data = [{} for i in range(num_meshes)]
+    elem_data = ensure_list(elem_data)
+
+    if node_data is None:
+        node_data = [{} for i in range(num_meshes)]
+    node_data = ensure_list(node_data)
+
+    data = [{} for i in range(num_meshes)]
+    for elem_dict, data_dict in zip(elem_data, data):
+        for field, values in elem_dict.items():
+            data_dict[field] = values
+    for node_dict, data_dict in zip(node_data, data):
+        for field, values in node_dict.items():
+            data_dict[field] = values
     data = ensure_list(data)
-    connectivity = ensure_list(connectivity)
+
     if bbox is None:
         bbox = np.array([[coordinates[:,i].min() - 0.1 * abs(coordinates[:,i].min()),
                           coordinates[:,i].max() + 0.1 * abs(coordinates[:,i].max())]
@@ -1733,5 +1767,12 @@
 
     sds = StreamUnstructuredMeshDataset(handler, geometry = geometry)
 
+    fluid_types = ()
+    for i in range(1, num_meshes + 1):
+        fluid_types += ('connect%d' % i,)
+    sds.fluid_types = fluid_types
+
+    sds._node_fields = node_data[0].keys()
+    sds._elem_fields = elem_data[0].keys()
+
     return sds
-

diff -r 8749b4851a293d389b9e051cf901bac35fa2b91e -r a3ac4125985758f42f50bd7f183f8445a394ca8c yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -19,54 +19,54 @@
 
 class StreamFieldInfo(FieldInfoContainer):
     known_other_fields = (
-        ("density", ("code_mass/code_length**3", [], None)),
-        ("dark_matter_density", ("code_mass/code_length**3", [], None)),
-        ("number_density", ("1/code_length**3", [], None)),
-        ("pressure", ("dyne/code_length**2", [], None)),
-        ("thermal_energy", ("erg / g", [], None)),
-        ("temperature", ("K", [], None)),
-        ("velocity_x", ("code_length/code_time", [], None)),
-        ("velocity_y", ("code_length/code_time", [], None)),
-        ("velocity_z", ("code_length/code_time", [], None)),
-        ("magnetic_field_x", ("gauss", [], None)),
-        ("magnetic_field_y", ("gauss", [], None)),
-        ("magnetic_field_z", ("gauss", [], None)),
-        ("radiation_acceleration_x", ("code_length/code_time**2", [], None)),
-        ("radiation_acceleration_y", ("code_length/code_time**2", [], None)),
-        ("radiation_acceleration_z", ("code_length/code_time**2", [], None)),
+        ("density", ("code_mass/code_length**3", ["density"], None)),
+        ("dark_matter_density", ("code_mass/code_length**3", ["dark_matter_density"], None)),
+        ("number_density", ("1/code_length**3", ["number_density"], None)),
+        ("pressure", ("dyne/code_length**2", ["pressure"], None)),
+        ("thermal_energy", ("erg / g", ["thermal_energy"], None)),
+        ("temperature", ("K", ["temperature"], None)),
+        ("velocity_x", ("code_length/code_time", ["velocity_x"], None)),
+        ("velocity_y", ("code_length/code_time", ["velocity_y"], None)),
+        ("velocity_z", ("code_length/code_time", ["velocity_z"], None)),
+        ("magnetic_field_x", ("gauss", ["magnetic_field_x"], None)),
+        ("magnetic_field_y", ("gauss", ["magnetic_field_y"], None)),
+        ("magnetic_field_z", ("gauss", ["magnetic_field_z"], None)),
+        ("radiation_acceleration_x", ("code_length/code_time**2", ["radiation_acceleration_x"], None)),
+        ("radiation_acceleration_y", ("code_length/code_time**2", ["radiation_acceleration_y"], None)),
+        ("radiation_acceleration_z", ("code_length/code_time**2", ["radiation_acceleration_z"], None)),
 
         # We need to have a bunch of species fields here, too
-        ("metal_density",   ("code_mass/code_length**3", [], None)),
-        ("hi_density",      ("code_mass/code_length**3", [], None)),
-        ("hii_density",     ("code_mass/code_length**3", [], None)),
-        ("h2i_density",     ("code_mass/code_length**3", [], None)),
-        ("h2ii_density",    ("code_mass/code_length**3", [], None)),
-        ("h2m_density",     ("code_mass/code_length**3", [], None)),
-        ("hei_density",     ("code_mass/code_length**3", [], None)),
-        ("heii_density",    ("code_mass/code_length**3", [], None)),
-        ("heiii_density",   ("code_mass/code_length**3", [], None)),
-        ("hdi_density",     ("code_mass/code_length**3", [], None)),
-        ("di_density",      ("code_mass/code_length**3", [], None)),
-        ("dii_density",     ("code_mass/code_length**3", [], None)),
+        ("metal_density",   ("code_mass/code_length**3", ["metal_density"], None)),
+        ("hi_density",      ("code_mass/code_length**3", ["hi_density"], None)),
+        ("hii_density",     ("code_mass/code_length**3", ["hii_density"], None)),
+        ("h2i_density",     ("code_mass/code_length**3", ["h2i_density"], None)),
+        ("h2ii_density",    ("code_mass/code_length**3", ["h2ii_density"], None)),
+        ("h2m_density",     ("code_mass/code_length**3", ["h2m_density"], None)),
+        ("hei_density",     ("code_mass/code_length**3", ["hei_density"], None)),
+        ("heii_density",    ("code_mass/code_length**3", ["heii_density"], None)),
+        ("heiii_density",   ("code_mass/code_length**3", ["heiii_density"], None)),
+        ("hdi_density",     ("code_mass/code_length**3", ["hdi_density"], None)),
+        ("di_density",      ("code_mass/code_length**3", ["di_density"], None)),
+        ("dii_density",     ("code_mass/code_length**3", ["dii_density"], None)),
     )
 
     known_particle_fields = (
-        ("particle_position", ("code_length", [], None)),
-        ("particle_position_x", ("code_length", [], None)),
-        ("particle_position_y", ("code_length", [], None)),
-        ("particle_position_z", ("code_length", [], None)),
-        ("particle_velocity", ("code_length/code_time", [], None)),
-        ("particle_velocity_x", ("code_length/code_time", [], None)),
-        ("particle_velocity_y", ("code_length/code_time", [], None)),
-        ("particle_velocity_z", ("code_length/code_time", [], None)),
-        ("particle_index", ("", [], None)),
-        ("particle_gas_density", ("code_mass/code_length**3", [], None)),
-        ("particle_gas_temperature", ("K", [], None)),
-        ("particle_mass", ("code_mass", [], None)),
-        ("smoothing_length", ("code_length", [], None)),
-        ("density", ("code_mass/code_length**3", [], None)),
-        ("temperature", ("code_temperature", [], None)),
-        ("creation_time", ("code_time", [], None)),
+        ("particle_position", ("code_length", ["particle_position"], None)),
+        ("particle_position_x", ("code_length", ["particle_position_x"], None)),
+        ("particle_position_y", ("code_length", ["particle_position_y"], None)),
+        ("particle_position_z", ("code_length", ["particle_position_z"], None)),
+        ("particle_velocity", ("code_length/code_time", ["particle_velocity"], None)),
+        ("particle_velocity_x", ("code_length/code_time", ["particle_velocity_x"], None)),
+        ("particle_velocity_y", ("code_length/code_time", ["particle_velocity_y"], None)),
+        ("particle_velocity_z", ("code_length/code_time", ["particle_velocity_z"], None)),
+        ("particle_index", ("", ["particle_index"], None)),
+        ("particle_gas_density", ("code_mass/code_length**3", ["particle_gas_density"], None)),
+        ("particle_gas_temperature", ("K", ["particle_gas_temperature"], None)),
+        ("particle_mass", ("code_mass", ["particle_mass"], None)),
+        ("smoothing_length", ("code_length", ["smoothing_length"], None)),
+        ("density", ("code_mass/code_length**3", ["density"], None)),
+        ("temperature", ("code_temperature", ["temperature"], None)),
+        ("creation_time", ("code_time", ["creation_time"], None)),
     )
 
     def setup_fluid_fields(self):

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/aee9d370c020/
Changeset:   aee9d370c020
Branch:      yt
User:        xarthisius
Date:        2015-12-16 16:02:32+00:00
Summary:     Use context manager to close files with stderr
Affected #:  1 file

diff -r a3ac4125985758f42f50bd7f183f8445a394ca8c -r aee9d370c020eacd05cf65df3f57478a95b42845 tests/nose_runner.py
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -4,19 +4,22 @@
 import multiprocessing as mp
 import nose
 import glob
+from contextlib import closing
 from yt.config import ytcfg
 from yt.utilities.answer_testing.framework import AnswerTesting
 
+
 def run_job(argv):
-    cur_stderr = sys.stderr
-    sys.stderr = open(str(os.getpid()) + ".out", "w")
-    answer = argv[0]
-    test_dir = ytcfg.get("yt", "test_data_dir")
-    answers_dir = os.path.join(test_dir, "answers")
-    if not os.path.isdir(os.path.join(answers_dir, answer)):
-        nose.run(argv=argv + ['--answer-store'],
-                 addplugins=[AnswerTesting()], exit=False)
-    nose.run(argv=argv, addplugins=[AnswerTesting()], exit=False)
+    with closing(open(str(os.getpid()) + ".out", "w")) as fstderr:
+        cur_stderr = sys.stderr
+        sys.stderr = fstderr
+        answer = argv[0]
+        test_dir = ytcfg.get("yt", "test_data_dir")
+        answers_dir = os.path.join(test_dir, "answers")
+        if not os.path.isdir(os.path.join(answers_dir, answer)):
+            nose.run(argv=argv + ['--answer-store'],
+                     addplugins=[AnswerTesting()], exit=False)
+        nose.run(argv=argv, addplugins=[AnswerTesting()], exit=False)
     sys.stderr = cur_stderr
 
 if __name__ == "__main__":


https://bitbucket.org/yt_analysis/yt/commits/11eeec1a027c/
Changeset:   11eeec1a027c
Branch:      yt
User:        xarthisius
Date:        2015-12-16 17:56:30+00:00
Summary:     Change seed as it was causing issues for py3
Affected #:  1 file

diff -r aee9d370c020eacd05cf65df3f57478a95b42845 -r 11eeec1a027c32b4d753ea12f6df5b877a0d2f00 yt/visualization/tests/test_mesh_slices.py
--- a/yt/visualization/tests/test_mesh_slices.py
+++ b/yt/visualization/tests/test_mesh_slices.py
@@ -32,7 +32,7 @@
     curdir = os.getcwd()
     os.chdir(tmpdir)
 
-    np.random.seed(0451)
+    np.random.seed(0x4d3d3d3)
 
     # tetrahedral ds
     ds = fake_tetrahedral_ds()


https://bitbucket.org/yt_analysis/yt/commits/fdac582125a3/
Changeset:   fdac582125a3
Branch:      yt
User:        xarthisius
Date:        2015-12-16 19:18:14+00:00
Summary:     Run flake8 without changing dirs
Affected #:  1 file

diff -r 11eeec1a027c32b4d753ea12f6df5b877a0d2f00 -r fdac582125a3e6c1236372412b7f3e01e853a68b yt/tests/test_flake8.py
--- a/yt/tests/test_flake8.py
+++ b/yt/tests/test_flake8.py
@@ -8,14 +8,14 @@
 @requires_module('flake8')
 def test_flake8():
     yt_dir = os.path.dirname(os.path.abspath(yt.__file__))
-    initial_dir = os.getcwd()
-    os.chdir(yt_dir)
-    output_file = os.path.sep.join([os.path.dirname(initial_dir), 'flake8.out'])
+    output_file = os.environ.get("WORKSPACE", None) or os.getcwd()
+    output_file = os.path.join(output_file, 'flake8.out')
     if os.path.exists(output_file):
         os.remove(output_file)
     output_string = "--output-file=%s" % output_file
-    subprocess.call(['flake8', output_string, os.curdir])
-    os.chdir(initial_dir)
+    config_string = "--config=%s" % os.path.join(yt_dir, 'setup.cfg')
+    subprocess.call(['flake8', output_string, config_string, yt_dir])
+    
     with open(output_file) as f:
         flake8_output = f.readlines()
     if flake8_output != []:


https://bitbucket.org/yt_analysis/yt/commits/117e64c7a65b/
Changeset:   117e64c7a65b
Branch:      yt
User:        xarthisius
Date:        2015-12-16 19:18:55+00:00
Summary:     merging
Affected #:  1 file

diff -r fdac582125a3e6c1236372412b7f3e01e853a68b -r 117e64c7a65b8cb823f9e1d790e2c8f47c7ff76a yt/frontends/gadget/tests/test_outputs.py
--- a/yt/frontends/gadget/tests/test_outputs.py
+++ b/yt/frontends/gadget/tests/test_outputs.py
@@ -25,7 +25,7 @@
 
 isothermal_h5 = "IsothermalCollapse/snap_505.hdf5"
 isothermal_bin = "IsothermalCollapse/snap_505"
-gdg = "GadgetDiskGalaxy/snapshot_200.hdf5"
+g64 = "gizmo_64/output/snap_N64L16_135.hdf5"
 
 # This maps from field names to weight field names to use for projections
 iso_fields = OrderedDict(
@@ -42,9 +42,9 @@
 )
 iso_kwargs = dict(bounding_box=[[-3, 3], [-3, 3], [-3, 3]])
 
-gdg_fields = iso_fields.copy()
-gdg_fields["deposit", "PartType4_density"] = None
-gdg_kwargs = dict(bounding_box=[[-1e5, 1e5], [-1e5, 1e5], [-1e5, 1e5]])
+g64_fields = iso_fields.copy()
+g64_fields["deposit", "PartType4_density"] = None
+g64_kwargs = dict(bounding_box=[[-1e5, 1e5], [-1e5, 1e5], [-1e5, 1e5]])
 
 
 @requires_file(isothermal_h5)
@@ -63,9 +63,9 @@
         test_iso_collapse.__name__ = test.description
         yield test
 
- at requires_ds(gdg, big_data=True)
-def test_gadget_disk_galaxy():
-    for test in sph_answer(gdg, 'snapshot_200', 11907080, gdg_fields,
-                           ds_kwargs=gdg_kwargs):
-        test_gadget_disk_galaxy.__name__ = test.description
+ at requires_ds(g64, big_data=True)
+def test_gizmo_64():
+    for test in sph_answer(g64, 'snap_N64L16_135', 524288, g64_fields,
+                           ds_kwargs=g64_kwargs):
+        test_gizmo_64.__name__ = test.description
         yield test


https://bitbucket.org/yt_analysis/yt/commits/9addedac7192/
Changeset:   9addedac7192
Branch:      yt
User:        xarthisius
Date:        2015-12-16 19:26:43+00:00
Summary:     setup.cfg is in yt_dir/..
Affected #:  1 file

diff -r 117e64c7a65b8cb823f9e1d790e2c8f47c7ff76a -r 9addedac719230963eb442a92805e7c85d0f0226 yt/tests/test_flake8.py
--- a/yt/tests/test_flake8.py
+++ b/yt/tests/test_flake8.py
@@ -13,7 +13,8 @@
     if os.path.exists(output_file):
         os.remove(output_file)
     output_string = "--output-file=%s" % output_file
-    config_string = "--config=%s" % os.path.join(yt_dir, 'setup.cfg')
+    config_string = "--config=%s" % os.path.join(os.path.dirname(yt_dir), 
+                                                 'setup.cfg')
     subprocess.call(['flake8', output_string, config_string, yt_dir])
     
     with open(output_file) as f:


https://bitbucket.org/yt_analysis/yt/commits/45097cd63280/
Changeset:   45097cd63280
Branch:      yt
User:        xarthisius
Date:        2015-12-16 20:53:14+00:00
Summary:     merging
Affected #:  1 file

diff -r 9addedac719230963eb442a92805e7c85d0f0226 -r 45097cd632806cccbbec37bc44db2f825b89cb70 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -47,6 +47,12 @@
                "y":("z","x"),
                "z":("x","y")}
 
+def force_unicode(value):
+    if hasattr(value, 'decode'):
+        return value.decode('utf8')
+    else:
+        return value
+
 def parse_value(value, default_units):
     if isinstance(value, YTQuantity):
         return value.in_units(default_units)
@@ -919,27 +925,28 @@
 
         p = f["/parameters"]
         parameters["ExposureTime"] = YTQuantity(p["exp_time"].value, "s")
-        if isinstance(p["area"].value, (string_types, bytes)):
-            parameters["Area"] = p["area"].value.decode("utf8")
+        area = force_unicode(p['area'].value)
+        if isinstance(area, string_types):
+            parameters["Area"] = area
         else:
-            parameters["Area"] = YTQuantity(p["area"].value, "cm**2")
+            parameters["Area"] = YTQuantity(area, "cm**2")
         parameters["Redshift"] = p["redshift"].value
         parameters["AngularDiameterDistance"] = YTQuantity(p["d_a"].value, "Mpc")
         parameters["sky_center"] = YTArray(p["sky_center"][:], "deg")
         parameters["dtheta"] = YTQuantity(p["dtheta"].value, "deg")
         parameters["pix_center"] = p["pix_center"][:]
         if "rmf" in p:
-            parameters["RMF"] = p["rmf"].value.decode("utf8")
+            parameters["RMF"] = force_unicode(p["rmf"].value)
         if "arf" in p:
-            parameters["ARF"] = p["arf"].value.decode("utf8")
+            parameters["ARF"] = force_unicode(p["arf"].value)
         if "channel_type" in p:
-            parameters["ChannelType"] = p["channel_type"].value.decode("utf8")
+            parameters["ChannelType"] = force_unicode(p["channel_type"].value)
         if "mission" in p:
-            parameters["Mission"] = p["mission"].value.decode("utf8")
+            parameters["Mission"] = force_unicode(p["mission"].value)
         if "telescope" in p:
-            parameters["Telescope"] = p["telescope"].value.decode("utf8")
+            parameters["Telescope"] = force_unicode(p["telescope"].value)
         if "instrument" in p:
-            parameters["Instrument"] = p["instrument"].value.decode("utf8")
+            parameters["Instrument"] = force_unicode(p["instrument"].value)
 
         d = f["/data"]
         events["xpix"] = d["xpix"][:]
@@ -1552,4 +1559,4 @@
             d.create_dataset(key, data=f_in[key].value)
 
     f_in.close()
-    f_out.close()
\ No newline at end of file
+    f_out.close()


https://bitbucket.org/yt_analysis/yt/commits/ccfba2dc8567/
Changeset:   ccfba2dc8567
Branch:      yt
User:        xarthisius
Date:        2015-12-17 01:55:45+00:00
Summary:     merging
Affected #:  3 files

diff -r 45097cd632806cccbbec37bc44db2f825b89cb70 -r ccfba2dc8567bd11fdcaeac4f19594c4e915432b yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -158,7 +158,7 @@
             return []
 
     def _get_unique_identifier(self):
-        return self.parameter_filename.__hash__()
+        return self.parameter_filename
 
     def _get_current_time(self):
         try:

diff -r 45097cd632806cccbbec37bc44db2f825b89cb70 -r ccfba2dc8567bd11fdcaeac4f19594c4e915432b yt/frontends/exodus_ii/tests/test_outputs.py
--- a/yt/frontends/exodus_ii/tests/test_outputs.py
+++ b/yt/frontends/exodus_ii/tests/test_outputs.py
@@ -28,7 +28,6 @@
     ds = data_dir_load(out)
     yield assert_equal, str(ds), "out.e"
     yield assert_equal, ds.dimensionality, 3
-    yield assert_equal, ds.unique_identifier, 5081193338833632556
     yield assert_equal, ds.current_time, 0.0
     yield assert_array_equal, ds.parameters['nod_names'], ['convected', 'diffused']
     yield assert_equal, ds.parameters['num_meshes'], 2

diff -r 45097cd632806cccbbec37bc44db2f825b89cb70 -r ccfba2dc8567bd11fdcaeac4f19594c4e915432b yt/frontends/exodus_ii/util.py
--- a/yt/frontends/exodus_ii/util.py
+++ b/yt/frontends/exodus_ii/util.py
@@ -1,10 +1,12 @@
-import string
+import sys
 from itertools import takewhile
 from collections import OrderedDict
 import re
 
 def sanitize_string(s):
-    return "".join(_ for _ in takewhile(lambda a: a in string.printable, s))
+    if sys.version_info > (3, ):
+        return "".join([chr(_) for _ in takewhile(lambda a: a, s)])
+    return "".join([_ for _ in takewhile(lambda a: a, s)])
 
 def load_info_records(info_records):
     info_records_parsed = [sanitize_string(line_chars) for line_chars in info_records]


https://bitbucket.org/yt_analysis/yt/commits/97d5e68dd39a/
Changeset:   97d5e68dd39a
Branch:      yt
User:        ngoldbaum
Date:        2015-12-17 21:20:24+00:00
Summary:     Merged in xarthisius/yt (pull request #1893)

[WIP] automatic answer testing definition
Affected #:  6 files

diff -r 47ce79f5d017d9a9352f30c0d1382b7297bbfd5c -r 97d5e68dd39a2a008c8c621f94d253bfa8fb0b5d tests/nose_runner.py
--- /dev/null
+++ b/tests/nose_runner.py
@@ -0,0 +1,54 @@
+import sys
+import os
+import yaml
+import multiprocessing as mp
+import nose
+import glob
+from contextlib import closing
+from yt.config import ytcfg
+from yt.utilities.answer_testing.framework import AnswerTesting
+
+
+def run_job(argv):
+    with closing(open(str(os.getpid()) + ".out", "w")) as fstderr:
+        cur_stderr = sys.stderr
+        sys.stderr = fstderr
+        answer = argv[0]
+        test_dir = ytcfg.get("yt", "test_data_dir")
+        answers_dir = os.path.join(test_dir, "answers")
+        if not os.path.isdir(os.path.join(answers_dir, answer)):
+            nose.run(argv=argv + ['--answer-store'],
+                     addplugins=[AnswerTesting()], exit=False)
+        nose.run(argv=argv, addplugins=[AnswerTesting()], exit=False)
+    sys.stderr = cur_stderr
+
+if __name__ == "__main__":
+    test_dir = ytcfg.get("yt", "test_data_dir")
+    answers_dir = os.path.join(test_dir, "answers")
+    with open('tests/tests_%i.%i.yaml' % sys.version_info[:2], 'r') as obj:
+        tests = yaml.load(obj)
+
+    base_argv = ['--local-dir=%s' % answers_dir, '-v', '-s', '--nologcapture',
+                 '--with-answer-testing', '--answer-big-data', '--local']
+    args = [['unittests', '-v', '-s', '--nologcapture']]
+    for answer in list(tests.keys()):
+        argv = [answer]
+        argv += base_argv
+        argv.append('--xunit-file=%s.xml' % answer)
+        argv.append('--answer-name=%s' % answer)
+        argv += tests[answer]
+        args.append(argv)
+    
+    processes = [mp.Process(target=run_job, args=(args[i],))
+                 for i in range(len(args))]
+    for p in processes:
+        p.start()
+    for p in processes:
+        p.join(timeout=7200)
+        if p.is_alive():
+            p.terminate()
+            p.join(timeout=30)
+    for fname in glob.glob("*.out"):
+        with open(fname, 'r') as fin:
+            print(fin.read())
+        os.remove(fname)

diff -r 47ce79f5d017d9a9352f30c0d1382b7297bbfd5c -r 97d5e68dd39a2a008c8c621f94d253bfa8fb0b5d tests/tests_2.7.yaml
--- /dev/null
+++ b/tests/tests_2.7.yaml
@@ -0,0 +1,51 @@
+local_artio_270:
+  - yt/frontends/artio/tests/test_outputs.py
+
+local_athena_270:
+  - yt/frontends/athena
+
+local_chombo_270:
+  - yt/frontends/chombo/tests/test_outputs.py
+
+local_enzo_270:
+  - yt/frontends/enzo
+
+local_fits_270:
+  - yt/frontends/fits/tests/test_outputs.py
+
+local_flash_270:
+  - yt/frontends/flash/tests/test_outputs.py
+
+local_gadget_270:
+  - yt/frontends/gadget/tests/test_outputs.py
+
+local_halos_270:
+  - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
+  - yt/analysis_modules/halo_finding/tests/test_rockstar.py
+  - yt/frontends/owls_subfind/tests/test_outputs.py
+
+local_owls_270:
+  - yt/frontends/owls/tests/test_outputs.py
+
+local_pw_270:
+  - yt/visualization/tests/test_plotwindow.py:test_attributes
+  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+
+local_tipsy_270:
+  - yt/frontends/tipsy/tests/test_outputs.py
+
+local_varia_270:
+  - yt/analysis_modules/radmc3d_export
+  - yt/frontends/moab/tests/test_c5.py
+  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+
+local_orion_270:
+  - yt/frontends/boxlib/tests/test_orion.py
+
+local_ramses_270:
+  - yt/frontends/ramses/tests/test_outputs.py
+
+local_ytdata_270:
+  - yt/frontends/ytdata
\ No newline at end of file

diff -r 47ce79f5d017d9a9352f30c0d1382b7297bbfd5c -r 97d5e68dd39a2a008c8c621f94d253bfa8fb0b5d tests/tests_3.4.yaml
--- /dev/null
+++ b/tests/tests_3.4.yaml
@@ -0,0 +1,49 @@
+local_artio_340:
+  - yt/frontends/artio/tests/test_outputs.py
+
+local_athena_340:
+  - yt/frontends/athena
+
+local_chombo_340:
+  - yt/frontends/chombo/tests/test_outputs.py
+
+local_enzo_340:
+  - yt/frontends/enzo
+
+local_fits_340:
+  - yt/frontends/fits/tests/test_outputs.py
+
+local_flash_340:
+  - yt/frontends/flash/tests/test_outputs.py
+
+local_gadget_340:
+  - yt/frontends/gadget/tests/test_outputs.py
+
+local_halos_340:
+  - yt/frontends/owls_subfind/tests/test_outputs.py
+
+local_owls_340:
+  - yt/frontends/owls/tests/test_outputs.py
+
+local_pw_340:
+  - yt/visualization/tests/test_plotwindow.py:test_attributes
+  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+
+local_tipsy_340:
+  - yt/frontends/tipsy/tests/test_outputs.py
+
+local_varia_340:
+  - yt/analysis_modules/radmc3d_export
+  - yt/frontends/moab/tests/test_c5.py
+  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+
+local_orion_340:
+  - yt/frontends/boxlib/tests/test_orion.py
+
+local_ramses_340:
+  - yt/frontends/ramses/tests/test_outputs.py
+
+local_ytdata_340:
+  - yt/frontends/ytdata
\ No newline at end of file

diff -r 47ce79f5d017d9a9352f30c0d1382b7297bbfd5c -r 97d5e68dd39a2a008c8c621f94d253bfa8fb0b5d yt/tests/test_flake8.py
--- a/yt/tests/test_flake8.py
+++ b/yt/tests/test_flake8.py
@@ -8,14 +8,15 @@
 @requires_module('flake8')
 def test_flake8():
     yt_dir = os.path.dirname(os.path.abspath(yt.__file__))
-    initial_dir = os.getcwd()
-    os.chdir(yt_dir)
-    output_file = os.path.sep.join([os.path.dirname(initial_dir), 'flake8.out'])
+    output_file = os.environ.get("WORKSPACE", None) or os.getcwd()
+    output_file = os.path.join(output_file, 'flake8.out')
     if os.path.exists(output_file):
         os.remove(output_file)
     output_string = "--output-file=%s" % output_file
-    subprocess.call(['flake8', output_string, os.curdir])
-    os.chdir(initial_dir)
+    config_string = "--config=%s" % os.path.join(os.path.dirname(yt_dir), 
+                                                 'setup.cfg')
+    subprocess.call(['flake8', output_string, config_string, yt_dir])
+    
     with open(output_file) as f:
         flake8_output = f.readlines()
     if flake8_output != []:

diff -r 47ce79f5d017d9a9352f30c0d1382b7297bbfd5c -r 97d5e68dd39a2a008c8c621f94d253bfa8fb0b5d yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -671,7 +671,11 @@
                             err_msg=err_msg, verbose=True)
 
 def compare_image_lists(new_result, old_result, decimals):
-    fns = ['old.png', 'new.png']
+    fns = []
+    for i in range(2):
+        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+        os.close(tmpfd)
+        fns.append(tmpname)
     num_images = len(old_result)
     assert(num_images > 0)
     for i in range(num_images):

diff -r 47ce79f5d017d9a9352f30c0d1382b7297bbfd5c -r 97d5e68dd39a2a008c8c621f94d253bfa8fb0b5d yt/visualization/tests/test_mesh_slices.py
--- a/yt/visualization/tests/test_mesh_slices.py
+++ b/yt/visualization/tests/test_mesh_slices.py
@@ -32,7 +32,7 @@
     curdir = os.getcwd()
     os.chdir(tmpdir)
 
-    np.random.seed(0451)
+    np.random.seed(0x4d3d3d3)
 
     # tetrahedral ds
     ds = fake_tetrahedral_ds()

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list