[yt-svn] commit/yt-3.0: 124 new changesets

Bitbucket commits-noreply at bitbucket.org
Fri Nov 9 19:18:58 PST 2012


124 new commits in yt-3.0:


https://bitbucket.org/yt_analysis/yt-3.0/changeset/1e10246f03ee/
changeset:   1e10246f03ee
branch:      yt-3.0
user:        MatthewTurk
date:        2012-08-29 16:14:40
summary:     Initial import of coordinate handler class
affected #:  1 file

diff -r b89b3c70056a0f7c0a19b654aa66211f02f0ff46 -r 1e10246f03ee514c48a3d2b87d12f67aabdb1ca3 yt/geometry/coordinate_handler.py
--- /dev/null
+++ b/yt/geometry/coordinate_handler.py
@@ -0,0 +1,162 @@
+"""
+Coordinate handler base class.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+import abc
+import weakref
+
+from yt.funcs import *
+from yt.data_objects.field_info_container import \
+    NullFunc, FieldInfoContainer
+from yt.utilities.io_handler import io_registry
+from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    ParallelAnalysisInterface, parallel_splitter
+from yt.utilities.lib.misc_utilities import \
+    pixelize_cylinder
+import yt.visualization._MPL
+
+class CoordinatesHandler(object):
+    
+    __metaclass__ = abc.ABCMeta
+
+    def __init__(self, pf):
+        self.pf = weakref.proxy(pf)
+
+    @abc.abstractmethod
+    def coordinate_fields(self):
+        # This should return field definitions for x, y, z, r, theta, phi
+        pass
+
+    @abc.abstractmethod
+    def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
+        # This should *actually* be a pixelize call, not just returning the
+        # pixelizer
+        pass
+
+    def cartesian_length(self, start, end):
+        p1 = self.convert_to_cartesian(start)
+        p2 = self.convert_to_cartesian(end)
+        return np.sqrt(((p1-p2)**2.0).sum())
+
+    @abc.abstractmethod
+    def convert_from_cartesian(self, coord):
+        pass
+
+    @abc.abstractmethod
+    def convert_to_cartesian(self, coord):
+        pass
+
+    @abc.abstractproperty
+    def axis_name(self):
+        pass
+
+    @abc.abstractproperty
+    def axis_id(self):
+        pass
+
+    @abc.abstractproperty
+    def x_axis(self):
+        pass
+
+    @abc.abstractproperty
+    def y_axis(self):
+        pass
+
+    @abc.abstractproperty
+    def period(self):
+        pass
+
+class CartesianCoordinatesHandler(CoordinatesHandler):
+
+    def __init__(self, pf):
+        super(CartesianCoordinatesHandler, self).__init__(pf)
+
+
+    def coordinate_fields(self):
+        pass
+
+    def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
+        if dimension < 3:
+            return self._ortho_pixelize(data_source, field, bounds, size, antialias)
+        else:
+            return self._oblique_pixelize(data_source, field, bounds, size, antialias)
+
+    def _ortho_pixelize(self, data_source, field, bounds, size, antialias):
+        # We should be using fcoords
+        buff = _MPL.Pixelize(data_source['px'], data_source['py'],
+                             data_source['pdx'], data_source['pdy'],
+                             data_source[field], size[0], size[1],
+                             bounds, int(antialias),
+                             True, self.period).transpose()
+        return buff
+
+    def _oblique_pixelize(self, data_source, field, bounds, size, antialias):
+        buff = _MPL.CPixelize(data_source['x'],   data_source['y'],   data_source['z'],
+                              data_source['px'],  data_source['py'],
+                              data_source['pdx'], data_source['pdy'], data_source['pdz'],
+                              data_source.center, data_source._inv_mat, indices,
+                              data_source[item], size[0], size[1], bounds).transpose()
+        return buff
+
+    def convert_from_cartesian(self, coord):
+        return coord
+
+    def convert_to_cartesian(self, coord):
+        return coord
+
+    @property
+    def axis_name(self):
+        return {
+             0  : 'x',  1  : 'y',  2  : 'z',
+            'x' : 'x', 'y' : 'y', 'z' : 'z',
+            'X' : 'x', 'Y' : 'y', 'Z' : 'z',
+        }
+
+    @property
+    def axis_id(self):
+        return {
+            'x' : 0, 'y' : 1, 'z' : 2,
+             0  : 0,  1  : 1,  2  : 2,
+        }
+
+    @property
+    def x_axis(self):
+        return {
+            'x' : 1, 'y' : 0, 'z' : 0,
+             0  : 1,  1  : 0,  2  : 0,
+        }
+
+    @property
+    def y_axis(self):
+        return {
+            'x' : 2, 'y' : 2, 'z' : 1,
+             0  : 2,  1  : 2,  2  : 1,
+        }
+
+    @property
+    def period(self):
+        return self.pf.domain_width
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d09c7b2d98bf/
changeset:   d09c7b2d98bf
branch:      yt-3.0
user:        MatthewTurk
date:        2012-08-29 17:12:52
summary:     Changing to use dicts by themselves in the class definition, despite their
mutable nature.  Reformatting to look more like Python code.
affected #:  1 file

diff -r 1e10246f03ee514c48a3d2b87d12f67aabdb1ca3 -r d09c7b2d98bfd1b0ec2f51344cb0d58a1a08e018 yt/geometry/coordinate_handler.py
--- a/yt/geometry/coordinate_handler.py
+++ b/yt/geometry/coordinate_handler.py
@@ -2,10 +2,10 @@
 Coordinate handler base class.
 
 Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
+Affiliation: Columbia University
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
 
   This file is part of yt.
 
@@ -127,36 +127,72 @@
     def convert_to_cartesian(self, coord):
         return coord
 
+    # Despite being mutables, we uses these here to be clear about how these
+    # are generated and to ensure that they are not re-generated unnecessarily
+    axis_name = { 0  : 'x',  1  : 'y',  2  : 'z',
+                 'x' : 'x', 'y' : 'y', 'z' : 'z',
+                 'X' : 'x', 'Y' : 'y', 'Z' : 'z'}
+
+    axis_id = { 'x' : 0, 'y' : 1, 'z' : 2,
+                 0  : 0,  1  : 1,  2  : 2}
+
+    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
+                0  : 1,  1  : 0,  2  : 0}
+
+    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
+                0  : 2,  1  : 2,  2  : 1}
+
+    @property
+    def period(self):
+        return self.pf.domain_width
+
+class PolarCoordinatesHandler(CoordinatesHandler):
+
+    def __init__(self, pf, ordering = 'rzt'):
+        if ordering != 'rzt': raise NotImplementedError
+        super(PolarCoordinatesHandler, self).__init__(pf)
+
+    def coordinate_fields(self):
+        # return the fields for r, z, theta
+        pass
+
+    def pixelize(self, dimension):
+        raise NotImplementedError
+        if dimension in (0, 2):
+            return _MPL.Pixelize
+        elif dimension == 2:
+            return pixelize_cylinder
+        elif dimension > 2:
+            raise NotImplementedError
+
     @property
     def axis_name(self):
         return {
-             0  : 'x',  1  : 'y',  2  : 'z',
-            'x' : 'x', 'y' : 'y', 'z' : 'z',
-            'X' : 'x', 'Y' : 'y', 'Z' : 'z',
+             0  : 'r',  1  : 'z',  2  : 'theta',
+            'r' : 'r', 'z' : 'z', 'theta' : 'theta',
+            'R' : 'r', 'Z' : 'z', 'Theta' : 'theta',
         }
 
     @property
     def axis_id(self):
         return {
-            'x' : 0, 'y' : 1, 'z' : 2,
+            'r' : 0, 'z' : 1, 'theta' : 2,
              0  : 0,  1  : 1,  2  : 2,
         }
 
     @property
     def x_axis(self):
         return {
-            'x' : 1, 'y' : 0, 'z' : 0,
+            'r' : 1, 'z' : 0, 'theta' : 0,
              0  : 1,  1  : 0,  2  : 0,
         }
 
     @property
     def y_axis(self):
         return {
-            'x' : 2, 'y' : 2, 'z' : 1,
+            'r' : 2, 'z' : 2, 'theta' : 1,
              0  : 2,  1  : 2,  2  : 1,
         }
 
-    @property
     def period(self):
-        return self.pf.domain_width
-
+        return na.array([0.0, 0.0, 2.0*np.pi])



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2685d51f91db/
changeset:   2685d51f91db
branch:      yt-3.0
user:        MatthewTurk
date:        2012-08-30 02:08:38
summary:     Taking the first couple of Anthony's suggestions for handling coordinate
transformations.
affected #:  1 file

diff -r d09c7b2d98bfd1b0ec2f51344cb0d58a1a08e018 -r 2685d51f91db52bf30febf44856af894511a158e yt/geometry/coordinate_handler.py
--- a/yt/geometry/coordinate_handler.py
+++ b/yt/geometry/coordinate_handler.py
@@ -40,54 +40,60 @@
 
 class CoordinatesHandler(object):
     
-    __metaclass__ = abc.ABCMeta
-
     def __init__(self, pf):
         self.pf = weakref.proxy(pf)
 
-    @abc.abstractmethod
     def coordinate_fields(self):
         # This should return field definitions for x, y, z, r, theta, phi
-        pass
+        raise NotImplementedError
 
-    @abc.abstractmethod
     def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
         # This should *actually* be a pixelize call, not just returning the
         # pixelizer
-        pass
+        raise NotImplementedError
 
-    def cartesian_length(self, start, end):
+    def distance(self, start, end):
         p1 = self.convert_to_cartesian(start)
         p2 = self.convert_to_cartesian(end)
         return np.sqrt(((p1-p2)**2.0).sum())
 
-    @abc.abstractmethod
     def convert_from_cartesian(self, coord):
-        pass
+        raise NotImplementedError
 
-    @abc.abstractmethod
     def convert_to_cartesian(self, coord):
-        pass
+        raise NotImplementedError
 
-    @abc.abstractproperty
+    def convert_to_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_to_spherical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_spherical(self, coord):
+        raise NotImplementedError
+
+    @property
     def axis_name(self):
-        pass
+        raise NotImplementedError
 
-    @abc.abstractproperty
+    @property
     def axis_id(self):
-        pass
+        raise NotImplementedError
 
-    @abc.abstractproperty
+    @property
     def x_axis(self):
-        pass
+        raise NotImplementedError
 
-    @abc.abstractproperty
+    @property
     def y_axis(self):
-        pass
+        raise NotImplementedError
 
-    @abc.abstractproperty
+    @property
     def period(self):
-        pass
+        raise NotImplementedError
 
 class CartesianCoordinatesHandler(CoordinatesHandler):
 
@@ -165,34 +171,19 @@
         elif dimension > 2:
             raise NotImplementedError
 
-    @property
-    def axis_name(self):
-        return {
-             0  : 'r',  1  : 'z',  2  : 'theta',
-            'r' : 'r', 'z' : 'z', 'theta' : 'theta',
-            'R' : 'r', 'Z' : 'z', 'Theta' : 'theta',
-        }
+    axis_name = { 0  : 'r',  1  : 'z',  2  : 'theta',
+                 'r' : 'r', 'z' : 'z', 'theta' : 'theta',
+                 'R' : 'r', 'Z' : 'z', 'Theta' : 'theta'}
+
+    axis_id = { 'r' : 0, 'z' : 1, 'theta' : 2,
+                 0  : 0,  1  : 1,  2  : 2}
+
+    x_axis = { 'r' : 1, 'z' : 0, 'theta' : 0,
+                0  : 1,  1  : 0,  2  : 0}
+
+    y_axis = { 'r' : 2, 'z' : 2, 'theta' : 1,
+                0  : 2,  1  : 2,  2  : 1}
 
     @property
-    def axis_id(self):
-        return {
-            'r' : 0, 'z' : 1, 'theta' : 2,
-             0  : 0,  1  : 1,  2  : 2,
-        }
-
-    @property
-    def x_axis(self):
-        return {
-            'r' : 1, 'z' : 0, 'theta' : 0,
-             0  : 1,  1  : 0,  2  : 0,
-        }
-
-    @property
-    def y_axis(self):
-        return {
-            'r' : 2, 'z' : 2, 'theta' : 1,
-             0  : 2,  1  : 2,  2  : 1,
-        }
-
     def period(self):
         return na.array([0.0, 0.0, 2.0*np.pi])



https://bitbucket.org/yt_analysis/yt-3.0/changeset/01c2bacad29b/
changeset:   01c2bacad29b
branch:      yt-3.0
user:        MatthewTurk
date:        2012-08-30 13:21:50
summary:     Adding a few pairwise conversions to coordinate handlers..
affected #:  1 file

diff -r 2685d51f91db52bf30febf44856af894511a158e -r 01c2bacad29b20e81e034ae608139e931bafe58a yt/geometry/coordinate_handler.py
--- a/yt/geometry/coordinate_handler.py
+++ b/yt/geometry/coordinate_handler.py
@@ -95,6 +95,22 @@
     def period(self):
         raise NotImplementedError
 
+def cartesian_to_cylindrical(coord, center = (0,0,0)):
+    c2 = np.zeros_like(coord)
+    c2[...,0] = ((coord[...,0] - center[0])**2.0
+              +  (coord[...,1] - center[1])**2.0)**0.5
+    c2[...,1] = coord[...,2] # rzt
+    c2[...,2] = np.arctans(coord[...,1] - center[1],
+                           coord[...,0] - center[0])
+    return c2
+
+def cylindrical_to_cartesian(coord, center = (0,0,0)):
+    c2 = np.zeros_like(coord)
+    c2[...,0] = np.cos(coord[...,0]) * coord[...,1] + center[0]
+    c2[...,1] = np.sin(coord[...,0]) * coord[...,1] + center[1]
+    c2[...,2] = coord[...,2]
+    return c2
+
 class CartesianCoordinatesHandler(CoordinatesHandler):
 
     def __init__(self, pf):
@@ -120,9 +136,11 @@
         return buff
 
     def _oblique_pixelize(self, data_source, field, bounds, size, antialias):
-        buff = _MPL.CPixelize(data_source['x'],   data_source['y'],   data_source['z'],
-                              data_source['px'],  data_source['py'],
-                              data_source['pdx'], data_source['pdy'], data_source['pdz'],
+        indices = np.argsort(data_source['dx'])[::-1]
+        buff = _MPL.CPixelize(data_source['x'], data_source['y'],
+                              data_source['z'], data_source['px'],
+                              data_source['py'], data_source['pdx'],
+                              data_source['pdy'], data_source['pdz'],
                               data_source.center, data_source._inv_mat, indices,
                               data_source[item], size[0], size[1], bounds).transpose()
         return buff
@@ -133,6 +151,20 @@
     def convert_to_cartesian(self, coord):
         return coord
 
+    def convert_to_cylindrical(self, coord):
+        center = self.pf.domain_center
+        return cartesian_to_cylindrical(coord, center)
+
+    def convert_from_cylindrical(self, coord):
+        center = self.pf.domain_center
+        return cylindrical_to_cartesian(coord, center)
+
+    def convert_to_spherical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_spherical(self, coord):
+        raise NotImplementedError
+
     # Despite being mutables, we uses these here to be clear about how these
     # are generated and to ensure that they are not re-generated unnecessarily
     axis_name = { 0  : 'x',  1  : 'y',  2  : 'z',
@@ -184,6 +216,25 @@
     y_axis = { 'r' : 2, 'z' : 2, 'theta' : 1,
                 0  : 2,  1  : 2,  2  : 1}
 
+    def convert_from_cartesian(self, coord):
+        return cartesian_to_cylindrical(coord)
+
+    def convert_to_cartesian(self, coord):
+        return cylindrical_to_cartesian(coord, center)
+
+    def convert_to_cylindrical(self, coord):
+        return coord
+
+    def convert_from_cylindrical(self, coord):
+        return coord
+
+    def convert_to_spherical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_spherical(self, coord):
+        raise NotImplementedError
+
     @property
     def period(self):
         return na.array([0.0, 0.0, 2.0*np.pi])
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e3f22fd4ecbb/
changeset:   e3f22fd4ecbb
branch:      yt-3.0
user:        MatthewTurk
date:        2012-08-30 17:07:13
summary:     Fleshing out the cylindrical coordinate handler
affected #:  1 file

diff -r 01c2bacad29b20e81e034ae608139e931bafe58a -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 yt/geometry/coordinate_handler.py
--- a/yt/geometry/coordinate_handler.py
+++ b/yt/geometry/coordinate_handler.py
@@ -194,15 +194,34 @@
         # return the fields for r, z, theta
         pass
 
-    def pixelize(self, dimension):
+    def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
         raise NotImplementedError
-        if dimension in (0, 2):
-            return _MPL.Pixelize
+        if dimension == 1:
+            return self._ortho_pixelize(data_source, field, bounds, size,
+                                        antialias)
         elif dimension == 2:
-            return pixelize_cylinder
-        elif dimension > 2:
+            return self._polar_pixelize(data_source, field, bounds, size,
+                                        antialias)
+        else:
+            # Pixelizing along a cylindrical surface is a bit tricky
             raise NotImplementedError
 
+    def _ortho_pixelize(self, data_source, field, bounds, size, antialias):
+        buff = _MPL.Pixelize(data_source['px'], data_source['py'],
+                             data_source['pdx'], data_source['pdy'],
+                             data_source[field], size[0], size[1],
+                             bounds, int(antialias),
+                             True, self.period).transpose()
+        return buff
+
+    def _polar_pixelize(self, data_source, field, bounds, size, antialias):
+        buff = pixelize_cylinder(data_source['r'],
+                                 data_source['dr']/2.0,
+                                 data_source['theta'],
+                                 data_source['dtheta']/2.0,
+                                 size[0], field, bounds[0])
+        return buff
+
     axis_name = { 0  : 'r',  1  : 'z',  2  : 'theta',
                  'r' : 'r', 'z' : 'z', 'theta' : 'theta',
                  'R' : 'r', 'Z' : 'z', 'Theta' : 'theta'}
@@ -220,7 +239,7 @@
         return cartesian_to_cylindrical(coord)
 
     def convert_to_cartesian(self, coord):
-        return cylindrical_to_cartesian(coord, center)
+        return cylindrical_to_cartesian(coord)
 
     def convert_to_cylindrical(self, coord):
         return coord



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e25156a7dfa0/
changeset:   e25156a7dfa0
branch:      yt
user:        MatthewTurk
date:        2012-10-15 06:37:52
summary:     Adding the initial nose plugin for answer testing.
affected #:  3 files

diff -r c33844a7aa055486cbcf1a7f0176c60bb5e2fc0b -r e25156a7dfa013a4f60cfe3378c03fe0157de326 setup.py
--- a/setup.py
+++ b/setup.py
@@ -154,7 +154,11 @@
             'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
                             'yt = yt.utilities.command_line:run_main',
-                       ]},
+                      ],
+                      'nose.plugins.0.10': [
+                            'answer-testing = yt.utilities.answer_testing.api:AnswerTesting'
+                      ]
+        },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",


diff -r c33844a7aa055486cbcf1a7f0176c60bb5e2fc0b -r e25156a7dfa013a4f60cfe3378c03fe0157de326 yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -57,3 +57,8 @@
     TestBooleanANDParticleQuantity, \
     TestBooleanORParticleQuantity, \
     TestBooleanNOTParticleQuantity
+
+try:
+    from .framework import AnswerTesting
+except ImportError:
+    raise


diff -r c33844a7aa055486cbcf1a7f0176c60bb5e2fc0b -r e25156a7dfa013a4f60cfe3378c03fe0157de326 yt/utilities/answer_testing/framework.py
--- /dev/null
+++ b/yt/utilities/answer_testing/framework.py
@@ -0,0 +1,65 @@
+"""
+Answer Testing using Nose as a starting point
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import logging
+import os
+
+from yt.testing import *
+from yt.utilities.command_line import get_yt_version
+from yt.config import ytcfg
+from nose.plugins import Plugin
+
+log = logging.getLogger('nose.plugins.answer-testing')
+
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        test_storage_directory = ytcfg.get("yt", "test_storage_dir")
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        parser.add_option("--answer-parameter-file", dest="parameter_file",
+            default=os.path.join(os.getcwd(), "tests/DD0010/moving7_0010"),
+            help="The parameter file value to feed to 'load' to test against")
+        parser.add_option("--answer-output", dest="storage_dir",
+            default=test_storage_directory,
+            help="Base directory for storing test output.")
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=None,
+            help="The name against which we will compare")
+        parser.add_option("--answer-name", dest="this_name",
+            default=my_hash,
+            help="The name we'll call this set of tests")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+
+    def finalize(self, result):
+        pass



https://bitbucket.org/yt_analysis/yt-3.0/changeset/ad59029089d4/
changeset:   ad59029089d4
branch:      yt
user:        MatthewTurk
date:        2012-10-15 07:11:44
summary:     A bit more fleshing out of the new way of having the answer testing plugin,
including some loading of tests from storage.
affected #:  1 file

diff -r e25156a7dfa013a4f60cfe3378c03fe0157de326 -r ad59029089d41fd2269d9d6ca5fdeb6d96b55aae yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -25,6 +25,7 @@
 
 import logging
 import os
+import shelve
 
 from yt.testing import *
 from yt.utilities.command_line import get_yt_version
@@ -60,6 +61,32 @@
         super(AnswerTesting, self).configure(options, conf)
         if not self.enabled:
             return
+        AnswerTestingTest.result_storage = shelve.Shelf(
+            os.path.join(options.storage_dir,
+                         options.this_name))
+        if options.compare_name is not None:
+            AnswerTestingTest.reference_storage = shelve.Shelf(
+                os.path.join(options.storage_dir,
+                            options.compare_name))
 
     def finalize(self, result):
         pass
+
+class AnswerTestingTest(object):
+    reference_storage = None
+
+    description = None
+    def __init__(self, name, pf_fn):
+        self.pf = load(pf_fn)
+        self.name = "%s_%s" % (pf, name)
+
+    def __call__(self):
+        if self.reference_storage is not None:
+            ov = self.reference_storage.get(self.name, None)
+        else:
+            ov = None
+        nv = self.run()
+        return self.compare(nv, ov)
+
+    def compare(self, new_result, old_result):
+        pass



https://bitbucket.org/yt_analysis/yt-3.0/changeset/938a408e6867/
changeset:   938a408e6867
branch:      yt
user:        MatthewTurk
date:        2012-10-16 08:59:45
summary:     Porting over a bunch of the methods of testing individual data outputs.  Added
a first set of Enzo tests.  Still DIY.
affected #:  3 files

diff -r ad59029089d41fd2269d9d6ca5fdeb6d96b55aae -r 938a408e686781812245aa094fbafda19260edcd yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -54,6 +54,7 @@
     pasteboard_repo = '',
     reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
+    data_storage_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',


diff -r ad59029089d41fd2269d9d6ca5fdeb6d96b55aae -r 938a408e686781812245aa094fbafda19260edcd yt/frontends/enzo/tests/test_moving7.py
--- /dev/null
+++ b/yt/frontends/enzo/tests/test_moving7.py
@@ -0,0 +1,34 @@
+"""
+Enzo frontend tests using moving7
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import assert_fields
+from yt.frontends.enzo.api import EnzoStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude")
+
+def test_moving7():
+    for k in assert_fields("DD0010/moving7_0010", _fields):
+        yield k


diff -r ad59029089d41fd2269d9d6ca5fdeb6d96b55aae -r 938a408e686781812245aa094fbafda19260edcd yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -26,11 +26,14 @@
 import logging
 import os
 import shelve
+import hashlib
+import contextlib
 
 from yt.testing import *
 from yt.utilities.command_line import get_yt_version
 from yt.config import ytcfg
 from nose.plugins import Plugin
+from yt.mods import *
 
 log = logging.getLogger('nose.plugins.answer-testing')
 
@@ -61,32 +64,176 @@
         super(AnswerTesting, self).configure(options, conf)
         if not self.enabled:
             return
-        AnswerTestingTest.result_storage = shelve.Shelf(
+        AnswerTestingTest.result_storage = shelve.open(
             os.path.join(options.storage_dir,
                          options.this_name))
         if options.compare_name is not None:
-            AnswerTestingTest.reference_storage = shelve.Shelf(
+            AnswerTestingTest.reference_storage = shelve.open(
                 os.path.join(options.storage_dir,
                             options.compare_name))
 
     def finalize(self, result):
         pass
 
+ at contextlib.contextmanager
+def temp_cwd(cwd):
+    oldcwd = os.getcwd()
+    os.chdir(cwd)
+    yield
+    os.chdir(oldcwd)
+
 class AnswerTestingTest(object):
     reference_storage = None
 
     description = None
     def __init__(self, name, pf_fn):
-        self.pf = load(pf_fn)
-        self.name = "%s_%s" % (pf, name)
+        path = ytcfg.get("yt", "data_storage_dir")
+        with temp_cwd(path):
+            self.pf = load(pf_fn)
+            self.pf.h
+        self.name = "%s_%s" % (self.pf, name)
 
     def __call__(self):
+        nv = self.run()
+        self.result_storage[self.name] = nv
         if self.reference_storage is not None:
             ov = self.reference_storage.get(self.name, None)
+            return self.compare(nv, ov)
         else:
             ov = None
-        nv = self.run()
-        return self.compare(nv, ov)
+            return True
 
     def compare(self, new_result, old_result):
-        pass
+        raise RuntimeError
+
+    def create_obj(self, pf, obj_type):
+        # obj_type should be tuple of
+        #  ( obj_name, ( args ) )
+        if obj_type is None:
+            return pf.h.all_data()
+        cls = getattr(pf.h, obj_type[0])
+        obj = cls(*obj_type[1])
+        return obj
+
+    @property
+    def sim_center(self):
+        """
+        This returns the center of the domain.
+        """
+        return 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+
+    @property
+    def max_dens_location(self):
+        """
+        This is a helper function to return the location of the most dense
+        point.
+        """
+        return self.pf.h.find_max("Density")[1]
+
+    @property
+    def entire_simulation(self):
+        """
+        Return an unsorted array of values that cover the entire domain.
+        """
+        return self.pf.h.all_data()
+        
+class FieldValuesTest(AnswerTestingTest):
+
+    def __init__(self, name, pf_fn, field, obj_type = None):
+        super(FieldValuesTest, self).__init__(name, pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        return obj[self.field].sort()
+
+    def compare(self, new_result, old_result):
+        assert_equal(new_result, old_result)
+
+def _try_load(pf_fn):
+    try:
+        load(pf_fn)
+    except:
+        return False
+    return True
+
+def assert_fields(pf_fn, fields, data_obj = None):
+    if AnswerTestingTest.result_storage is None: return 
+    for field in fields:
+        yield FieldValuesTest("FieldValues_%s" % field, pf_fn,
+                              field, data_obj)
+
+class ProjectionValuesTest(AnswerTestingTest):
+    def __init__(self, name, pf_fn, axis, field,
+                 weight_field = None, data_source = None):
+        super(ProjectionValuesTest, self).__init__(name, pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.data_source = None
+
+    def run(self):
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field)
+        return proj
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result.field_data) == len(old_result.field_data))
+        for k in new_result.field_data:
+            assert (k in old_result.field_data)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class GridValuesTest(AnswerTestingTest):
+    def __init__(self, name, pf_fn, field):
+        super(GridValuesTest, self).__init__(name, pf_fn)
+
+    def run(self):
+        hashes = {}
+        for g in self.pf.h.grids:
+            hashes[g.id] = hashlib.md5(g[self.field].tostring()).hexdigest()
+            g.clear_data()
+        return hashes
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class TestGridHierarchy(AnswerTestingTest):
+    def run(self):
+        result = {}
+        result["grid_dimensions"] = self.pf.h.grid_dimensions
+        result["grid_left_edges"] = self.pf.h.grid_left_edge
+        result["grid_right_edges"] = self.pf.h.grid_right_edge
+        result["grid_levels"] = self.pf.h.grid_levels
+        result["grid_particle_count"] = self.pf.h.grid_particle_count
+
+    def compare(self, new_result, old_result):
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class TestParentageRelationships(AnswerTestingTest):
+    def run(self):
+        result = {}
+        result["parents"] = []
+        result["children"] = []
+        for g in self.pf.h.grids:
+            p = g.Parent
+            if p is None:
+                result["parents"].append(None)
+            elif hasattr(p, "id"):
+                result["parents"].append(p.id)
+            else:
+                result["parents"].append([pg.id for pg in p])
+            result["children"].append([c.id for c in g.Children])
+        return result
+
+    def compare(self, new_result, old_result):
+        for newp, oldp in zip(new_result["parents"], old_result["parents"]):
+            assert(newp == oldp)
+        for newc, oldc in zip(new_result["children"], old_result["children"]):
+            assert(newp == oldp)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/bac81d5a63a2/
changeset:   bac81d5a63a2
branch:      yt
user:        MatthewTurk
date:        2012-10-16 09:17:20
summary:     Answer testing preliminarily works for a particular Enzo output.
affected #:  2 files

diff -r 938a408e686781812245aa094fbafda19260edcd -r bac81d5a63a2e7d3f797d1bb7d769dc7fec52443 yt/frontends/enzo/tests/test_moving7.py
--- a/yt/frontends/enzo/tests/test_moving7.py
+++ b/yt/frontends/enzo/tests/test_moving7.py
@@ -24,11 +24,24 @@
 """
 
 from yt.testing import *
-from yt.utilities.answer_testing.framework import assert_fields
+from yt.utilities.answer_testing.framework import \
+    can_run_pf, ProjectionValuesTest, FieldValuesTest
 from yt.frontends.enzo.api import EnzoStaticOutput
 
-_fields = ("Temperature", "Density", "VelocityMagnitude")
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
+           "particle_density")
+
+pf_fn = "DD0010/moving7_0010"
 
 def test_moving7():
-    for k in assert_fields("DD0010/moving7_0010", _fields):
-        yield k
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    for field in _fields:
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)


diff -r 938a408e686781812245aa094fbafda19260edcd -r bac81d5a63a2e7d3f797d1bb7d769dc7fec52443 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -32,6 +32,8 @@
 from yt.testing import *
 from yt.utilities.command_line import get_yt_version
 from yt.config import ytcfg
+from yt.utilities.logger import \
+    disable_stream_logging
 from nose.plugins import Plugin
 from yt.mods import *
 
@@ -64,6 +66,9 @@
         super(AnswerTesting, self).configure(options, conf)
         if not self.enabled:
             return
+        disable_stream_logging()
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = shelve.open(
             os.path.join(options.storage_dir,
                          options.this_name))
@@ -139,34 +144,32 @@
         
 class FieldValuesTest(AnswerTestingTest):
 
-    def __init__(self, name, pf_fn, field, obj_type = None):
+    def __init__(self, pf_fn, field, obj_type = None):
+        name = "%s_%s" % (pf_fn, field)
         super(FieldValuesTest, self).__init__(name, pf_fn)
         self.obj_type = obj_type
         self.field = field
 
     def run(self):
         obj = self.create_obj(self.pf, self.obj_type)
-        return obj[self.field].sort()
+        return np.sort(obj[self.field])
 
     def compare(self, new_result, old_result):
         assert_equal(new_result, old_result)
 
-def _try_load(pf_fn):
-    try:
-        load(pf_fn)
-    except:
-        return False
-    return True
-
-def assert_fields(pf_fn, fields, data_obj = None):
-    if AnswerTestingTest.result_storage is None: return 
-    for field in fields:
-        yield FieldValuesTest("FieldValues_%s" % field, pf_fn,
-                              field, data_obj)
+def can_run_pf(pf_fn):
+    path = ytcfg.get("yt", "data_storage_dir")
+    with temp_cwd(path):
+        try:
+            load(pf_fn)
+        except:
+            return False
+    return AnswerTestingTest.result_storage is not None
 
 class ProjectionValuesTest(AnswerTestingTest):
-    def __init__(self, name, pf_fn, axis, field,
+    def __init__(self, pf_fn, axis, field,
                  weight_field = None, data_source = None):
+        name = "%s_%s_%s_%s" % (pf_fn, axis, field, weight_field)
         super(ProjectionValuesTest, self).__init__(name, pf_fn)
         self.axis = axis
         self.field = field



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d734e554f99e/
changeset:   d734e554f99e
branch:      yt
user:        MatthewTurk
date:        2012-10-18 00:10:16
summary:     Storing results in S3 now seems to work.
affected #:  2 files

diff -r bac81d5a63a2e7d3f797d1bb7d769dc7fec52443 -r d734e554f99ed498076525aea7204ea1994acb9a yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -54,7 +54,7 @@
     pasteboard_repo = '',
     reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
-    data_storage_dir = '/does/not/exist',
+    test_data_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',


diff -r bac81d5a63a2e7d3f797d1bb7d769dc7fec52443 -r d734e554f99ed498076525aea7204ea1994acb9a yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -25,9 +25,9 @@
 
 import logging
 import os
-import shelve
 import hashlib
 import contextlib
+import urllib2
 
 from yt.testing import *
 from yt.utilities.command_line import get_yt_version
@@ -36,31 +36,29 @@
     disable_stream_logging
 from nose.plugins import Plugin
 from yt.mods import *
+import cPickle
 
-log = logging.getLogger('nose.plugins.answer-testing')
+mylog = logging.getLogger('nose.plugins.answer-testing')
+
+_latest = "SomeValue"
+_url_path = "http://yt_answer_tests.s3-website-us-east-1.amazonaws.com/%s"
 
 class AnswerTesting(Plugin):
     name = "answer-testing"
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
-        test_storage_directory = ytcfg.get("yt", "test_storage_dir")
         try:
             my_hash = get_yt_version()
         except:
             my_hash = "UNKNOWN%s" % (time.time())
-        parser.add_option("--answer-parameter-file", dest="parameter_file",
-            default=os.path.join(os.getcwd(), "tests/DD0010/moving7_0010"),
-            help="The parameter file value to feed to 'load' to test against")
-        parser.add_option("--answer-output", dest="storage_dir",
-            default=test_storage_directory,
-            help="Base directory for storing test output.")
         parser.add_option("--answer-compare", dest="compare_name",
-            default=None,
-            help="The name against which we will compare")
+            default=None, help="The name against which we will compare")
         parser.add_option("--answer-name", dest="this_name",
             default=my_hash,
             help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
 
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
@@ -69,16 +67,31 @@
         disable_stream_logging()
         from yt.config import ytcfg
         ytcfg["yt","__withintesting"] = "True"
-        AnswerTestingTest.result_storage = shelve.open(
-            os.path.join(options.storage_dir,
-                         options.this_name))
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
         if options.compare_name is not None:
-            AnswerTestingTest.reference_storage = shelve.open(
-                os.path.join(options.storage_dir,
-                            options.compare_name))
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            #AnswerTestingTest.reference_storage = urllib2
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
 
     def finalize(self, result):
-        pass
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt_answer_tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
 
 @contextlib.contextmanager
 def temp_cwd(cwd):
@@ -92,7 +105,7 @@
 
     description = None
     def __init__(self, name, pf_fn):
-        path = ytcfg.get("yt", "data_storage_dir")
+        path = ytcfg.get("yt", "test_data_dir")
         with temp_cwd(path):
             self.pf = load(pf_fn)
             self.pf.h
@@ -100,7 +113,7 @@
 
     def __call__(self):
         nv = self.run()
-        self.result_storage[self.name] = nv
+        self.result_storage[str(self.pf)][self.name] = nv
         if self.reference_storage is not None:
             ov = self.reference_storage.get(self.name, None)
             return self.compare(nv, ov)
@@ -158,7 +171,7 @@
         assert_equal(new_result, old_result)
 
 def can_run_pf(pf_fn):
-    path = ytcfg.get("yt", "data_storage_dir")
+    path = ytcfg.get("yt", "test_data_dir")
     with temp_cwd(path):
         try:
             load(pf_fn)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/af435ca59ecc/
changeset:   af435ca59ecc
branch:      yt
user:        MatthewTurk
date:        2012-10-18 03:59:12
summary:     Answer testing can now compare against reference values.
affected #:  2 files

diff -r d734e554f99ed498076525aea7204ea1994acb9a -r af435ca59ecccd46efe580b37a9d805ab32177e8 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -41,7 +41,27 @@
 mylog = logging.getLogger('nose.plugins.answer-testing')
 
 _latest = "SomeValue"
-_url_path = "http://yt_answer_tests.s3-website-us-east-1.amazonaws.com/%s"
+_url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
+
+class AnswerTestOpener(object):
+    def __init__(self, reference_name):
+        self.reference_name = reference_name
+        self.cache = {}
+
+    def get(self, pf_name, default = None):
+        if pf_name in self.cache: return self.cache[pf_name]
+        url = _url_path % (self.reference_name, pf_name)
+        try:
+            resp = urllib2.urlopen(url)
+            # This is dangerous, but we have a controlled S3 environment
+            data = resp.read()
+            rv = cPickle.loads(data)
+        except urllib2.HTTPError as ex:
+            raise YTNoOldAnswer(url)
+            mylog.warning("Missing %s (%s)", url, ex)
+            rv = default
+        self.cache[pf_name] = rv
+        return rv
 
 class AnswerTesting(Plugin):
     name = "answer-testing"
@@ -73,7 +93,8 @@
             # Now we grab from our S3 store
             if options.compare_name == "latest":
                 options.compare_name = _latest
-            #AnswerTestingTest.reference_storage = urllib2
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
         self.answer_name = options.this_name
         self.store_results = options.store_results
 
@@ -84,7 +105,7 @@
         import boto
         from boto.s3.key import Key
         c = boto.connect_s3()
-        bucket = c.get_bucket("yt_answer_tests")
+        bucket = c.get_bucket("yt-answer-tests")
         for pf_name in self.result_storage:
             rs = cPickle.dumps(self.result_storage[pf_name])
             tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
@@ -92,6 +113,7 @@
             k = Key(bucket)
             k.key = "%s_%s" % (self.answer_name, pf_name)
             k.set_contents_from_string(rs)
+            k.set_acl("public-read")
 
 @contextlib.contextmanager
 def temp_cwd(cwd):
@@ -100,22 +122,32 @@
     yield
     os.chdir(oldcwd)
 
+def can_run_pf(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        try:
+            load(pf_fn)
+        except:
+            return False
+    return AnswerTestingTest.result_storage is not None
+
 class AnswerTestingTest(object):
     reference_storage = None
 
     description = None
-    def __init__(self, name, pf_fn):
+    def __init__(self, pf_fn):
         path = ytcfg.get("yt", "test_data_dir")
         with temp_cwd(path):
             self.pf = load(pf_fn)
             self.pf.h
-        self.name = "%s_%s" % (self.pf, name)
 
     def __call__(self):
         nv = self.run()
         self.result_storage[str(self.pf)][self.name] = nv
         if self.reference_storage is not None:
-            ov = self.reference_storage.get(self.name, None)
+            dd = self.reference_storage.get(str(self.pf))
+            if dd is None: raise YTNoOldAnswer()
+            ov = dd[self.name]
             return self.compare(nv, ov)
         else:
             ov = None
@@ -154,56 +186,69 @@
         Return an unsorted array of values that cover the entire domain.
         """
         return self.pf.h.all_data()
+
+    @property
+    def name(self):
+        obj_type = getattr(self, "obj_type", None)
+        if obj_type is None:
+            oname = "all"
+        else:
+            oname = "_".join((str(s) for s in obj_type))
+        args = [self._type_name, str(self.pf), oname]
+        args += [str(getattr(self, an)) for an in self._attrs]
+        return "_".join(args)
         
 class FieldValuesTest(AnswerTestingTest):
+    _type_name = "FieldValues"
+    _attrs = ("field", )
 
     def __init__(self, pf_fn, field, obj_type = None):
-        name = "%s_%s" % (pf_fn, field)
-        super(FieldValuesTest, self).__init__(name, pf_fn)
+        super(FieldValuesTest, self).__init__(pf_fn)
         self.obj_type = obj_type
         self.field = field
 
     def run(self):
         obj = self.create_obj(self.pf, self.obj_type)
-        return np.sort(obj[self.field])
+        return obj[self.field]
 
     def compare(self, new_result, old_result):
         assert_equal(new_result, old_result)
 
-def can_run_pf(pf_fn):
-    path = ytcfg.get("yt", "test_data_dir")
-    with temp_cwd(path):
-        try:
-            load(pf_fn)
-        except:
-            return False
-    return AnswerTestingTest.result_storage is not None
+class ProjectionValuesTest(AnswerTestingTest):
+    _type_name = "ProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
 
-class ProjectionValuesTest(AnswerTestingTest):
-    def __init__(self, pf_fn, axis, field,
-                 weight_field = None, data_source = None):
-        name = "%s_%s_%s_%s" % (pf_fn, axis, field, weight_field)
-        super(ProjectionValuesTest, self).__init__(name, pf_fn)
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(ProjectionValuesTest, self).__init__(pf_fn)
         self.axis = axis
         self.field = field
         self.weight_field = field
-        self.data_source = None
+        self.obj_type = obj_type
 
     def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
         proj = self.pf.h.proj(self.axis, self.field,
-                              weight_field=self.weight_field)
-        return proj
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        return proj.field_data
 
     def compare(self, new_result, old_result):
-        assert(len(new_result.field_data) == len(old_result.field_data))
-        for k in new_result.field_data:
-            assert (k in old_result.field_data)
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
 class GridValuesTest(AnswerTestingTest):
+    _type_name = "GridValues"
+    _attrs = ("field",)
+
     def __init__(self, name, pf_fn, field):
-        super(GridValuesTest, self).__init__(name, pf_fn)
+        super(GridValuesTest, self).__init__(pf_fn)
 
     def run(self):
         hashes = {}
@@ -220,6 +265,9 @@
             assert_equal(new_result[k], old_result[k])
 
 class TestGridHierarchy(AnswerTestingTest):
+    _type_name = "GridHierarchy"
+    _attrs = ()
+
     def run(self):
         result = {}
         result["grid_dimensions"] = self.pf.h.grid_dimensions
@@ -233,6 +281,8 @@
             assert_equal(new_result[k], old_result[k])
 
 class TestParentageRelationships(AnswerTestingTest):
+    _type_name = "ParentageRelationships"
+    _attrs = ()
     def run(self):
         result = {}
         result["parents"] = []


diff -r d734e554f99ed498076525aea7204ea1994acb9a -r af435ca59ecccd46efe580b37a9d805ab32177e8 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -146,3 +146,11 @@
     def __str__(self):
         return "You must create an API key before uploading.  See " + \
                "https://data.yt-project.org/getting_started.html"
+
+class YTNoOldAnswer(YTException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "There is no old answer available.\n" + \
+               str(self.path)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8e2e8fe6ac49/
changeset:   8e2e8fe6ac49
branch:      yt
user:        MatthewTurk
date:        2012-10-18 04:39:24
summary:     Fixing up tests of grid hierarchy, etc, for moving7.  Related to YT-2.
affected #:  2 files

diff -r af435ca59ecccd46efe580b37a9d805ab32177e8 -r 8e2e8fe6ac49dea863dc4f40dbd1f49a066a9af3 yt/frontends/enzo/tests/test_moving7.py
--- a/yt/frontends/enzo/tests/test_moving7.py
+++ b/yt/frontends/enzo/tests/test_moving7.py
@@ -25,7 +25,9 @@
 
 from yt.testing import *
 from yt.utilities.answer_testing.framework import \
-    can_run_pf, ProjectionValuesTest, FieldValuesTest
+    can_run_pf, ProjectionValuesTest, FieldValuesTest, \
+    GridHierarchyTest, ParentageRelationshipsTest, \
+    GridValuesTest
 from yt.frontends.enzo.api import EnzoStaticOutput
 
 _fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
@@ -36,7 +38,10 @@
 def test_moving7():
     if not can_run_pf(pf_fn): return
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
     for field in _fields:
+        yield GridValuesTest(pf_fn, field)
         for axis in [0, 1, 2]:
             for ds in dso:
                 for weight_field in [None, "Density"]:


diff -r af435ca59ecccd46efe580b37a9d805ab32177e8 -r 8e2e8fe6ac49dea863dc4f40dbd1f49a066a9af3 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -247,8 +247,9 @@
     _type_name = "GridValues"
     _attrs = ("field",)
 
-    def __init__(self, name, pf_fn, field):
+    def __init__(self, pf_fn, field):
         super(GridValuesTest, self).__init__(pf_fn)
+        self.field = field
 
     def run(self):
         hashes = {}
@@ -264,7 +265,7 @@
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
-class TestGridHierarchy(AnswerTestingTest):
+class GridHierarchyTest(AnswerTestingTest):
     _type_name = "GridHierarchy"
     _attrs = ()
 
@@ -280,7 +281,7 @@
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
-class TestParentageRelationships(AnswerTestingTest):
+class ParentageRelationshipsTest(AnswerTestingTest):
     _type_name = "ParentageRelationships"
     _attrs = ()
     def run(self):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/137aedf82490/
changeset:   137aedf82490
branch:      yt
user:        MatthewTurk
date:        2012-10-18 04:47:58
summary:     Adding a "standard_patch_amr" test suite function
affected #:  3 files

diff -r 8e2e8fe6ac49dea863dc4f40dbd1f49a066a9af3 -r 137aedf8249008f988b572d517493b89fb0d3951 yt/frontends/enzo/tests/test_moving7.py
--- a/yt/frontends/enzo/tests/test_moving7.py
+++ b/yt/frontends/enzo/tests/test_moving7.py
@@ -25,9 +25,8 @@
 
 from yt.testing import *
 from yt.utilities.answer_testing.framework import \
-    can_run_pf, ProjectionValuesTest, FieldValuesTest, \
-    GridHierarchyTest, ParentageRelationshipsTest, \
-    GridValuesTest
+    requires_pf, \
+    standard_patch_amr
 from yt.frontends.enzo.api import EnzoStaticOutput
 
 _fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
@@ -35,18 +34,7 @@
 
 pf_fn = "DD0010/moving7_0010"
 
+ at requires_pf(pf_fn)
 def test_moving7():
-    if not can_run_pf(pf_fn): return
-    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
-    yield GridHierarchyTest(pf_fn)
-    yield ParentageRelationshipsTest(pf_fn)
-    for field in _fields:
-        yield GridValuesTest(pf_fn, field)
-        for axis in [0, 1, 2]:
-            for ds in dso:
-                for weight_field in [None, "Density"]:
-                    yield ProjectionValuesTest(
-                        pf_fn, axis, field, weight_field,
-                        ds)
-                yield FieldValuesTest(
-                        pf_fn, field, ds)
+    for test in standard_patch_amr(pf_fn, _fields):
+        yield test


diff -r 8e2e8fe6ac49dea863dc4f40dbd1f49a066a9af3 -r 137aedf8249008f988b572d517493b89fb0d3951 yt/utilities/answer_testing/default_tests.py
--- a/yt/utilities/answer_testing/default_tests.py
+++ b/yt/utilities/answer_testing/default_tests.py
@@ -67,3 +67,4 @@
         for field in sorted(self.result):
             for p1, p2 in zip(self.result[field], old_result[field]):
                 self.compare_data_arrays(p1, p2, self.tolerance)
+


diff -r 8e2e8fe6ac49dea863dc4f40dbd1f49a066a9af3 -r 137aedf8249008f988b572d517493b89fb0d3951 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -304,3 +304,27 @@
             assert(newp == oldp)
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
+
+def requires_pf(pf_fn):
+    def ffalse(func):
+        return lambda: None
+    if not can_run_pf(pf_fn):
+        return ffalse
+    def ftrue(func):
+        return func
+    return ftrue
+
+def standard_patch_amr(pf_fn, fields):
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/6fb61726fbb9/
changeset:   6fb61726fbb9
branch:      yt
user:        MatthewTurk
date:        2012-10-18 05:01:26
summary:     Cleaning up a bit with data_dir_load and removing a few print statements.
Updating nosetests setup.cfg entry to turn on answer testing by default.
affected #:  5 files

diff -r 137aedf8249008f988b572d517493b89fb0d3951 -r 6fb61726fbb9ac5448e9df6e40cf06c530869ae9 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -7,3 +7,5 @@
 where=yt
 exclude=answer_testing
 with-xunit=1
+with-answer-testing=1
+answer-compare=gold001


diff -r 137aedf8249008f988b572d517493b89fb0d3951 -r 6fb61726fbb9ac5448e9df6e40cf06c530869ae9 yt/frontends/enzo/tests/test_moving7.py
--- a/yt/frontends/enzo/tests/test_moving7.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""
-Enzo frontend tests using moving7
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from yt.testing import *
-from yt.utilities.answer_testing.framework import \
-    requires_pf, \
-    standard_patch_amr
-from yt.frontends.enzo.api import EnzoStaticOutput
-
-_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
-           "particle_density")
-
-pf_fn = "DD0010/moving7_0010"
-
- at requires_pf(pf_fn)
-def test_moving7():
-    for test in standard_patch_amr(pf_fn, _fields):
-        yield test


diff -r 137aedf8249008f988b572d517493b89fb0d3951 -r 6fb61726fbb9ac5448e9df6e40cf06c530869ae9 yt/frontends/enzo/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -0,0 +1,50 @@
+"""
+Enzo frontend tests using moving7
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    standard_patch_amr, \
+    data_dir_load
+from yt.frontends.enzo.api import EnzoStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
+           "particle_density")
+
+m7 = "DD0010/moving7_0010"
+ at requires_pf(m7)
+def test_moving7():
+    pf = data_dir_load(m7)
+    yield assert_equal, str(pf), "moving7_0010"
+    for test in standard_patch_amr(m7, _fields):
+        yield test
+
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+ at requires_pf(g30)
+def test_galaxy0030():
+    pf = data_dir_load(g30)
+    yield assert_equal, str(pf), "galaxy0030"
+    for test in standard_patch_amr(g30, _fields):
+        yield test


diff -r 137aedf8249008f988b572d517493b89fb0d3951 -r 6fb61726fbb9ac5448e9df6e40cf06c530869ae9 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -131,27 +131,30 @@
             return False
     return AnswerTestingTest.result_storage is not None
 
+def data_dir_load(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        pf = load(pf_fn)
+        pf.h
+        return pf
+
 class AnswerTestingTest(object):
     reference_storage = None
 
     description = None
     def __init__(self, pf_fn):
-        path = ytcfg.get("yt", "test_data_dir")
-        with temp_cwd(path):
-            self.pf = load(pf_fn)
-            self.pf.h
+        self.pf = data_dir_load(pf_fn)
 
     def __call__(self):
         nv = self.run()
-        self.result_storage[str(self.pf)][self.name] = nv
         if self.reference_storage is not None:
             dd = self.reference_storage.get(str(self.pf))
             if dd is None: raise YTNoOldAnswer()
             ov = dd[self.name]
-            return self.compare(nv, ov)
+            self.compare(nv, ov)
         else:
             ov = None
-            return True
+        self.result_storage[str(self.pf)][self.name] = nv
 
     def compare(self, new_result, old_result):
         raise RuntimeError
@@ -308,13 +311,15 @@
 def requires_pf(pf_fn):
     def ffalse(func):
         return lambda: None
+    def ftrue(func):
+        return func
     if not can_run_pf(pf_fn):
         return ffalse
-    def ftrue(func):
-        return func
-    return ftrue
+    else:
+        return ftrue
 
 def standard_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
     yield GridHierarchyTest(pf_fn)
     yield ParentageRelationshipsTest(pf_fn)


diff -r 137aedf8249008f988b572d517493b89fb0d3951 -r 6fb61726fbb9ac5448e9df6e40cf06c530869ae9 yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -29,14 +29,12 @@
 # We first create our dictionary of tests to run.  This starts out empty, and
 # as tests are imported it will be filled.
 if "TestRegistry" not in locals():
-    print "Initializing TestRegistry"
     class TestRegistry(dict):
         def __new__(cls, *p, **k):
             if not '_the_instance' in cls.__dict__:
                 cls._the_instance = dict.__new__(cls)
                 return cls._the_instance
 if "test_registry" not in locals():
-    print "Initializing test_registry"
     test_registry = TestRegistry()
 
 # The exceptions we raise, related to the character of the failure.



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3e20e73fd4c8/
changeset:   3e20e73fd4c8
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-18 06:24:15
summary:     Merging from answer testing branch
affected #:  9 files

diff -r 5b349e90c2f1d46682ddd73ec2aba800fe96f836 -r 3e20e73fd4c8d03a4993841dc6f835648050cd5b setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -7,3 +7,5 @@
 where=yt
 exclude=answer_testing
 with-xunit=1
+with-answer-testing=1
+answer-compare=gold001


diff -r 5b349e90c2f1d46682ddd73ec2aba800fe96f836 -r 3e20e73fd4c8d03a4993841dc6f835648050cd5b setup.py
--- a/setup.py
+++ b/setup.py
@@ -154,7 +154,11 @@
             'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
                             'yt = yt.utilities.command_line:run_main',
-                       ]},
+                      ],
+                      'nose.plugins.0.10': [
+                            'answer-testing = yt.utilities.answer_testing.api:AnswerTesting'
+                      ]
+        },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",


diff -r 5b349e90c2f1d46682ddd73ec2aba800fe96f836 -r 3e20e73fd4c8d03a4993841dc6f835648050cd5b yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -54,6 +54,7 @@
     pasteboard_repo = '',
     reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
+    test_data_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',


diff -r 5b349e90c2f1d46682ddd73ec2aba800fe96f836 -r 3e20e73fd4c8d03a4993841dc6f835648050cd5b yt/frontends/enzo/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -0,0 +1,50 @@
+"""
+Enzo frontend tests using moving7
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    standard_patch_amr, \
+    data_dir_load
+from yt.frontends.enzo.api import EnzoStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
+           "particle_density")
+
+m7 = "DD0010/moving7_0010"
+ at requires_pf(m7)
+def test_moving7():
+    pf = data_dir_load(m7)
+    yield assert_equal, str(pf), "moving7_0010"
+    for test in standard_patch_amr(m7, _fields):
+        yield test
+
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+ at requires_pf(g30)
+def test_galaxy0030():
+    pf = data_dir_load(g30)
+    yield assert_equal, str(pf), "galaxy0030"
+    for test in standard_patch_amr(g30, _fields):
+        yield test


diff -r 5b349e90c2f1d46682ddd73ec2aba800fe96f836 -r 3e20e73fd4c8d03a4993841dc6f835648050cd5b yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -57,3 +57,8 @@
     TestBooleanANDParticleQuantity, \
     TestBooleanORParticleQuantity, \
     TestBooleanNOTParticleQuantity
+
+try:
+    from .framework import AnswerTesting
+except ImportError:
+    raise


diff -r 5b349e90c2f1d46682ddd73ec2aba800fe96f836 -r 3e20e73fd4c8d03a4993841dc6f835648050cd5b yt/utilities/answer_testing/default_tests.py
--- a/yt/utilities/answer_testing/default_tests.py
+++ b/yt/utilities/answer_testing/default_tests.py
@@ -67,3 +67,4 @@
         for field in sorted(self.result):
             for p1, p2 in zip(self.result[field], old_result[field]):
                 self.compare_data_arrays(p1, p2, self.tolerance)
+


diff -r 5b349e90c2f1d46682ddd73ec2aba800fe96f836 -r 3e20e73fd4c8d03a4993841dc6f835648050cd5b yt/utilities/answer_testing/framework.py
--- /dev/null
+++ b/yt/utilities/answer_testing/framework.py
@@ -0,0 +1,335 @@
+"""
+Answer Testing using Nose as a starting point
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import logging
+import os
+import hashlib
+import contextlib
+import urllib2
+
+from yt.testing import *
+from yt.utilities.command_line import get_yt_version
+from yt.config import ytcfg
+from yt.utilities.logger import \
+    disable_stream_logging
+from nose.plugins import Plugin
+from yt.mods import *
+import cPickle
+
+mylog = logging.getLogger('nose.plugins.answer-testing')
+
+_latest = "SomeValue"
+_url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
+
+class AnswerTestOpener(object):
+    def __init__(self, reference_name):
+        self.reference_name = reference_name
+        self.cache = {}
+
+    def get(self, pf_name, default = None):
+        if pf_name in self.cache: return self.cache[pf_name]
+        url = _url_path % (self.reference_name, pf_name)
+        try:
+            resp = urllib2.urlopen(url)
+            # This is dangerous, but we have a controlled S3 environment
+            data = resp.read()
+            rv = cPickle.loads(data)
+        except urllib2.HTTPError as ex:
+            raise YTNoOldAnswer(url)
+            mylog.warning("Missing %s (%s)", url, ex)
+            rv = default
+        self.cache[pf_name] = rv
+        return rv
+
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=None, help="The name against which we will compare")
+        parser.add_option("--answer-name", dest="this_name",
+            default=my_hash,
+            help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+        disable_stream_logging()
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
+        if options.compare_name is not None:
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
+
+    def finalize(self, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+ at contextlib.contextmanager
+def temp_cwd(cwd):
+    oldcwd = os.getcwd()
+    os.chdir(cwd)
+    yield
+    os.chdir(oldcwd)
+
+def can_run_pf(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        try:
+            load(pf_fn)
+        except:
+            return False
+    return AnswerTestingTest.result_storage is not None
+
+def data_dir_load(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        pf = load(pf_fn)
+        pf.h
+        return pf
+
+class AnswerTestingTest(object):
+    reference_storage = None
+
+    description = None
+    def __init__(self, pf_fn):
+        self.pf = data_dir_load(pf_fn)
+
+    def __call__(self):
+        nv = self.run()
+        if self.reference_storage is not None:
+            dd = self.reference_storage.get(str(self.pf))
+            if dd is None: raise YTNoOldAnswer()
+            ov = dd[self.name]
+            self.compare(nv, ov)
+        else:
+            ov = None
+        self.result_storage[str(self.pf)][self.name] = nv
+
+    def compare(self, new_result, old_result):
+        raise RuntimeError
+
+    def create_obj(self, pf, obj_type):
+        # obj_type should be tuple of
+        #  ( obj_name, ( args ) )
+        if obj_type is None:
+            return pf.h.all_data()
+        cls = getattr(pf.h, obj_type[0])
+        obj = cls(*obj_type[1])
+        return obj
+
+    @property
+    def sim_center(self):
+        """
+        This returns the center of the domain.
+        """
+        return 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+
+    @property
+    def max_dens_location(self):
+        """
+        This is a helper function to return the location of the most dense
+        point.
+        """
+        return self.pf.h.find_max("Density")[1]
+
+    @property
+    def entire_simulation(self):
+        """
+        Return an unsorted array of values that cover the entire domain.
+        """
+        return self.pf.h.all_data()
+
+    @property
+    def name(self):
+        obj_type = getattr(self, "obj_type", None)
+        if obj_type is None:
+            oname = "all"
+        else:
+            oname = "_".join((str(s) for s in obj_type))
+        args = [self._type_name, str(self.pf), oname]
+        args += [str(getattr(self, an)) for an in self._attrs]
+        return "_".join(args)
+        
+class FieldValuesTest(AnswerTestingTest):
+    _type_name = "FieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None):
+        super(FieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        return obj[self.field]
+
+    def compare(self, new_result, old_result):
+        assert_equal(new_result, old_result)
+
+class ProjectionValuesTest(AnswerTestingTest):
+    _type_name = "ProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(ProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        return proj.field_data
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class GridValuesTest(AnswerTestingTest):
+    _type_name = "GridValues"
+    _attrs = ("field",)
+
+    def __init__(self, pf_fn, field):
+        super(GridValuesTest, self).__init__(pf_fn)
+        self.field = field
+
+    def run(self):
+        hashes = {}
+        for g in self.pf.h.grids:
+            hashes[g.id] = hashlib.md5(g[self.field].tostring()).hexdigest()
+            g.clear_data()
+        return hashes
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class GridHierarchyTest(AnswerTestingTest):
+    _type_name = "GridHierarchy"
+    _attrs = ()
+
+    def run(self):
+        result = {}
+        result["grid_dimensions"] = self.pf.h.grid_dimensions
+        result["grid_left_edges"] = self.pf.h.grid_left_edge
+        result["grid_right_edges"] = self.pf.h.grid_right_edge
+        result["grid_levels"] = self.pf.h.grid_levels
+        result["grid_particle_count"] = self.pf.h.grid_particle_count
+
+    def compare(self, new_result, old_result):
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class ParentageRelationshipsTest(AnswerTestingTest):
+    _type_name = "ParentageRelationships"
+    _attrs = ()
+    def run(self):
+        result = {}
+        result["parents"] = []
+        result["children"] = []
+        for g in self.pf.h.grids:
+            p = g.Parent
+            if p is None:
+                result["parents"].append(None)
+            elif hasattr(p, "id"):
+                result["parents"].append(p.id)
+            else:
+                result["parents"].append([pg.id for pg in p])
+            result["children"].append([c.id for c in g.Children])
+        return result
+
+    def compare(self, new_result, old_result):
+        for newp, oldp in zip(new_result["parents"], old_result["parents"]):
+            assert(newp == oldp)
+        for newc, oldc in zip(new_result["children"], old_result["children"]):
+            assert(newp == oldp)
+
+def requires_pf(pf_fn):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    if not can_run_pf(pf_fn):
+        return ffalse
+    else:
+        return ftrue
+
+def standard_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)


diff -r 5b349e90c2f1d46682ddd73ec2aba800fe96f836 -r 3e20e73fd4c8d03a4993841dc6f835648050cd5b yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -29,14 +29,12 @@
 # We first create our dictionary of tests to run.  This starts out empty, and
 # as tests are imported it will be filled.
 if "TestRegistry" not in locals():
-    print "Initializing TestRegistry"
     class TestRegistry(dict):
         def __new__(cls, *p, **k):
             if not '_the_instance' in cls.__dict__:
                 cls._the_instance = dict.__new__(cls)
                 return cls._the_instance
 if "test_registry" not in locals():
-    print "Initializing test_registry"
     test_registry = TestRegistry()
 
 # The exceptions we raise, related to the character of the failure.


diff -r 5b349e90c2f1d46682ddd73ec2aba800fe96f836 -r 3e20e73fd4c8d03a4993841dc6f835648050cd5b yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -148,3 +148,11 @@
     def __str__(self):
         return "You must create an API key before uploading.  See " + \
                "https://data.yt-project.org/getting_started.html"
+
+class YTNoOldAnswer(YTException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "There is no old answer available.\n" + \
+               str(self.path)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/33a1976b9dee/
changeset:   33a1976b9dee
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-20 14:24:47
summary:     Merging from yt-3.0 tip
affected #:  189 files

diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
-include distribute_setup.py
+include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.hh *.h README* 
+recursive-include yt *.pyx *.pxd *.hh *.h README*


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 doc/activate.csh
--- a/doc/activate.csh
+++ b/doc/activate.csh
@@ -20,7 +20,7 @@
     setenv YT_DEST
 endif
 set _OLD_VIRTUAL_YT_DEST="$YT_DEST"
-setenv YT_DEST "${VIRTUAL_ENV}:${YT_DEST}"
+setenv YT_DEST "${VIRTUAL_ENV}"
 
 if ($?PYTHONPATH == 0) then
     setenv PYTHONPATH


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -18,7 +18,7 @@
    Python is more forgiving than C.
  * Avoid copying memory when possible. For example, don't do 
    "a = a.reshape(3,4)" when "a.shape = (3,4)" will do, and "a = a * 3" should
-   be "na.multiply(a, 3, a)".
+   be "np.multiply(a, 3, a)".
  * In general, avoid all double-underscore method names: __something is usually
    unnecessary.
  * When writing a subclass, use the super built-in to access the super class,
@@ -40,8 +40,7 @@
 
    from yt.visualization.plot_collection import PlotCollection
 
- * Numpy is to be imported as "na" not "np".  While this may change in the
-   future, for now this is the correct idiom.
+ * Numpy is to be imported as "np", after a long time of using "na".
  * Do not use too many keyword arguments.  If you have a lot of keyword
    arguments, then you are doing too much in __init__ and not enough via
    parameter setting.


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -220,11 +220,24 @@
         echo "  * libncurses5-dev"
         echo "  * zip"
         echo "  * uuid-dev"
+        echo "  * libfreetype6-dev"
+        echo "  * tk-dev"
         echo
         echo "You can accomplish this by executing:"
         echo
-        echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev"
+        echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev libfreetype6-dev tk-dev"
         echo
+        echo
+        echo " Additionally, if you want to put yt's lib dir in your LD_LIBRARY_PATH"
+        echo " so you can use yt without the activate script, you might "
+        echo " want to consider turning off LIBZ and FREETYPE in this"
+        echo " install script by editing this file and setting"
+        echo
+        echo " INST_ZLIB=0"
+        echo " INST_FTYPE=0"
+        echo 
+        echo " to avoid conflicts with other command-line programs "
+        echo " (like eog and evince, for example)."
     fi
     if [ ! -z "${CFLAGS}" ]
     then
@@ -399,9 +412,8 @@
 # Now we dump all our SHA512 files out.
 
 echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
-echo 'b8a12bf05b3aafa71135e47da81440fd0f16a4bd91954bc5615ad3d3b7f9df7d5a7d5620dc61088dc6b04952c5c66ebda947a4cfa33ed1be614c8ca8c0f11dff  PhiloGL-1.4.2.zip' > PhiloGL-1.4.2.zip.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
+echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
@@ -430,7 +442,7 @@
 [ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
-get_ytproject Python-2.7.2.tgz
+get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
 get_ytproject matplotlib-1.1.0.tar.gz
 get_ytproject mercurial-2.2.2.tar.gz
@@ -555,11 +567,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.2/done ]
+if [ ! -e Python-2.7.3/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  YT loves you."
-    [ ! -e Python-2.7.2 ] && tar xfz Python-2.7.2.tgz
-    cd Python-2.7.2
+    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
+    cd Python-2.7.3
     ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -684,6 +696,11 @@
 cd $YT_DIR
 ( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
 
+echo "Building Fortran kD-tree module."
+cd yt/utilities/kdtree
+( make 2>&1 ) 1>> ${LOG_FILE}
+cd ../../..
+
 echo "Installing yt"
 echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 nose.cfg
--- /dev/null
+++ b/nose.cfg
@@ -0,0 +1,4 @@
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,3 +1,11 @@
 [egg_info]
 #tag_build = .dev
 #tag_svn_revision = 1
+
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing
+with-xunit=1
+with-answer-testing=1
+answer-compare=gold001


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 setup.py
--- a/setup.py
+++ b/setup.py
@@ -154,7 +154,11 @@
             'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
                             'yt = yt.utilities.command_line:run_main',
-                       ]},
+                      ],
+                      'nose.plugins.0.10': [
+                            'answer-testing = yt.utilities.answer_testing.api:AnswerTesting'
+                      ]
+        },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -81,3 +81,5 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
+
+__version__ = "2.5-dev"


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def voigt(a,u):
     """
@@ -65,15 +65,15 @@
             J. Murthy, Mar 1990 (adapted from the FORTRAN program of Armstrong)
                       Sep 1990 (better overflow checking)
     """
-    x = na.asarray(u).astype(na.float64)
-    y = na.asarray(a).astype(na.float64)
+    x = np.asarray(u).astype(np.float64)
+    y = np.asarray(a).astype(np.float64)
 
-    w = na.array([0.462243670,   0.286675505,   0.109017206, 
+    w = np.array([0.462243670,   0.286675505,   0.109017206, 
                   0.0248105209,  0.00324377334, 0.000228338636, 
                   7.80255648e-6, 1.08606937e-7, 4.39934099e-10, 
                   2.22939365e-13])
 
-    t = na.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
+    t = np.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
                   2.25497400,  2.78880606,  3.34785457, 3.94476404, 
                   4.60368245,  5.38748089])
 
@@ -94,31 +94,31 @@
     y2 = y * y
 
     # limits are y<1.,  x<4 or y<1.8(x+1),  x>4 (no checking performed)
-    u1 = na.exp(-x * x + y2) * na.cos(2. * x * y)
+    u1 = np.exp(-x * x + y2) * np.cos(2. * x * y)
 
     # Clenshaw's Algorithm
-    bno1 = na.zeros(x.shape)
-    bno2 = na.zeros(x.shape)
-    x1 = na.clip((x / 5.), -na.inf, 1.)
+    bno1 = np.zeros(x.shape)
+    bno2 = np.zeros(x.shape)
+    x1 = np.clip((x / 5.), -np.inf, 1.)
     coef = 4. * x1 * x1 - 2.
     for i in range(33, -1, -1):
         bn = coef * bno1 - bno2 + c[i]
-        bno2 = na.copy(bno1)
-        bno1 = na.copy(bn)
+        bno2 = np.copy(bno1)
+        bno1 = np.copy(bn)
 
     f = x1 * (bn - bno2)
     dno1 = 1. - 2. * x * f
     dno2 = f
 
-    q = na.abs(x) > 5
+    q = np.abs(x) > 5
     if q.any():
-        x14 = na.power(na.clip(x[q], -na.inf, 500.),  14)
-        x12 = na.power(na.clip(x[q], -na.inf, 1000.), 12)
-        x10 = na.power(na.clip(x[q], -na.inf, 5000.), 10)
-        x8  = na.power(na.clip(x[q], -na.inf, 50000.), 8)
-        x6  = na.power(na.clip(x[q], -na.inf, 1.e6),   6)
-        x4  = na.power(na.clip(x[q], -na.inf, 1.e9),   4)
-        x2  = na.power(na.clip(x[q], -na.inf, 1.e18),  2)
+        x14 = np.power(np.clip(x[q], -np.inf, 500.),  14)
+        x12 = np.power(np.clip(x[q], -np.inf, 1000.), 12)
+        x10 = np.power(np.clip(x[q], -np.inf, 5000.), 10)
+        x8  = np.power(np.clip(x[q], -np.inf, 50000.), 8)
+        x6  = np.power(np.clip(x[q], -np.inf, 1.e6),   6)
+        x4  = np.power(np.clip(x[q], -np.inf, 1.e9),   4)
+        x2  = np.power(np.clip(x[q], -np.inf, 1.e18),  2)
         dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 + 
                     6.5625 / x8 + 29.53125 / x10 +
                     162.4218 / x12 + 1055.7421 / x14)
@@ -135,12 +135,12 @@
             if (i % 2) == 1:
                 q = -q
                 yn = yn * y2
-                g = dn.astype(na.float64) * yn
+                g = dn.astype(np.float64) * yn
                 funct = funct + q * g
-                if na.max(na.abs(g / funct)) <= 1.e-8: break
+                if np.max(np.abs(g / funct)) <= 1.e-8: break
 
     k1 = u1 - 1.12837917 * funct
-    k1 = k1.astype(na.float64).clip(0)
+    k1 = k1.astype(np.float64).clip(0)
     return k1
 
 def tau_profile(lam0, fval, gamma, vkms, column_density, 
@@ -191,19 +191,19 @@
     ## create wavelength
     if lambda_bins is None:
         lambda_bins = lam1 + \
-            na.arange(n_lambda, dtype=na.float) * dlambda - \
+            np.arange(n_lambda, dtype=np.float) * dlambda - \
             n_lambda * dlambda / 2    # wavelength vector (angstroms)
     nua = ccgs / (lambda_bins / 1.e8) # frequency vector (Hz)
 
     ## tau_0
-    tau_X = na.sqrt(na.pi) * e**2 / (me * ccgs) * \
+    tau_X = np.sqrt(np.pi) * e**2 / (me * ccgs) * \
         column_density * fval / vdop
     tau1 = tau_X * lam1cgs
     tau0 = tau_X * lam0cgs
 
     # dimensionless frequency offset in units of doppler freq
     x = (nua - nu1) / nudop
-    a = gamma / (4 * na.pi * nudop)   # damping parameter 
+    a = gamma / (4 * np.pi * nudop)   # damping parameter 
     phi = voigt(a, x)                 # profile
     tauphi = tau0 * phi               # profile scaled with tau0
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from absorption_line import tau_profile
 
@@ -48,7 +48,7 @@
         self.tau_field = None
         self.flux_field = None
         self.spectrum_line_list = None
-        self.lambda_bins = na.linspace(lambda_min, lambda_max, n_lambda)
+        self.lambda_bins = np.linspace(lambda_min, lambda_max, n_lambda)
         self.bin_width = (lambda_max - lambda_min) / float(n_lambda - 1)
         self.line_list = []
         self.continuum_list = []
@@ -114,13 +114,13 @@
             field_data[field] = input[field].value
         input.close()
 
-        self.tau_field = na.zeros(self.lambda_bins.size)
+        self.tau_field = np.zeros(self.lambda_bins.size)
         self.spectrum_line_list = []
 
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity)
         self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
 
-        self.flux_field = na.exp(-self.tau_field)
+        self.flux_field = np.exp(-self.tau_field)
 
         if output_file.endswith('.h5'):
             self._write_spectrum_hdf5(output_file)
@@ -148,20 +148,20 @@
                 delta_lambda += continuum['wavelength'] * (1 + field_data['redshift']) * \
                     field_data['los_velocity'] / speed_of_light_cgs
             this_wavelength = delta_lambda + continuum['wavelength']
-            right_index = na.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
-            left_index = na.digitize((this_wavelength *
-                                     na.power((tau_min * continuum['normalization'] /
+            right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
+            left_index = np.digitize((this_wavelength *
+                                     np.power((tau_min * continuum['normalization'] /
                                                column_density), (1. / continuum['index']))),
                                     self.lambda_bins).clip(0, self.n_lambda)
 
-            valid_continuua = na.where(((column_density /
+            valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > tau_min) &
                                        (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
             for i, lixel in enumerate(valid_continuua):
-                line_tau = na.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
+                line_tau = np.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
                                      this_wavelength[lixel]), continuum['index']) * \
                                      column_density[lixel] / continuum['normalization']
                 self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
@@ -184,10 +184,10 @@
                 # include factor of (1 + z) because our velocity is in proper frame.
                 delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
                     field_data['los_velocity'] / speed_of_light_cgs
-            thermal_b = km_per_cm * na.sqrt((2 * boltzmann_constant_cgs *
+            thermal_b = km_per_cm * np.sqrt((2 * boltzmann_constant_cgs *
                                              field_data['Temperature']) /
                                             (amu_cgs * line['atomic_mass']))
-            center_bins = na.digitize((delta_lambda + line['wavelength']),
+            center_bins = np.digitize((delta_lambda + line['wavelength']),
                                       self.lambda_bins)
 
             # ratio of line width to bin width
@@ -201,7 +201,7 @@
                            spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
 
             # loop over all lines wider than the bin width
-            valid_lines = na.where((width_ratio >= 1.0) &
+            valid_lines = np.where((width_ratio >= 1.0) &
                                    (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
                             valid_lines.size)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/coordinate_transformation/transforms.py
--- a/yt/analysis_modules/coordinate_transformation/transforms.py
+++ b/yt/analysis_modules/coordinate_transformation/transforms.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 
 from yt.utilities.linear_interpolators import \
@@ -44,13 +44,13 @@
     mylog.warning("See yt/extensions/coordinate_transforms.py for plotting information")
     if center is None: center = pf.h.find_max("Density")[1]
     fields = ensure_list(fields)
-    r,theta,phi = na.mgrid[0:rmax:nr*1j,
-                           0:na.pi:ntheta*1j,
-                           0:2*na.pi:nphi*1j]
+    r,theta,phi = np.mgrid[0:rmax:nr*1j,
+                           0:np.pi:ntheta*1j,
+                           0:2*np.pi:nphi*1j]
     new_grid = dict(r=r, theta=theta, phi=phi)
-    new_grid['x'] = r*na.sin(theta)*na.cos(phi) + center[0]
-    new_grid['y'] = r*na.sin(theta)*na.sin(phi) + center[1]
-    new_grid['z'] = r*na.cos(theta)             + center[2]
+    new_grid['x'] = r*np.sin(theta)*np.cos(phi) + center[0]
+    new_grid['y'] = r*np.sin(theta)*np.sin(phi) + center[1]
+    new_grid['z'] = r*np.cos(theta)             + center[2]
     sphere = pf.h.sphere(center, rmax)
     return arbitrary_regrid(new_grid, sphere, fields, smoothed)
 
@@ -62,10 +62,10 @@
     This has not been well-tested other than for regular spherical regridding.
     """
     fields = ensure_list(fields)
-    new_grid['handled'] = na.zeros(new_grid['x'].shape, dtype='bool')
+    new_grid['handled'] = np.zeros(new_grid['x'].shape, dtype='bool')
     for field in fields:
-        new_grid[field] = na.zeros(new_grid['x'].shape, dtype='float64')
-    grid_order = na.argsort(data_source.gridLevels)
+        new_grid[field] = np.zeros(new_grid['x'].shape, dtype='float64')
+    grid_order = np.argsort(data_source.gridLevels)
     ng = len(data_source._grids)
 
     for i,grid in enumerate(data_source._grids[grid_order][::-1]):
@@ -73,12 +73,12 @@
         cg = grid.retrieve_ghost_zones(1, fields, smoothed=smoothed)
 
         # makes x0,x1,y0,y1,z0,z1
-        bounds = na.concatenate(zip(cg.left_edge, cg.right_edge)) 
+        bounds = np.concatenate(zip(cg.left_edge, cg.right_edge)) 
 
         
         # Now we figure out which of our points are inside this grid
         # Note that we're only looking at the grid, not the grid-with-ghost-zones
-        point_ind = na.ones(new_grid['handled'].shape, dtype='bool') # everything at first
+        point_ind = np.ones(new_grid['handled'].shape, dtype='bool') # everything at first
         for i,ax in enumerate('xyz'): # i = 0,1,2 ; ax = x, y, z
             # &= does a logical_and on the array
             point_ind &= ( ( grid.LeftEdge[i] <= new_grid[ax]      )
@@ -116,7 +116,7 @@
     pylab.clf()
     ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
     ax.pcolormesh(phi[:,i,:], r[:,i,:],
-                  na.log10(sph_grid[field][:,i,:]))
+                  np.log10(sph_grid[field][:,i,:]))
     pylab.savefig("polar/latitude_%03i.png" % i)
 
 for i in range(n_phi):
@@ -124,6 +124,6 @@
     pylab.clf()
     ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
     ax.pcolormesh(theta[:,:,i], r[:,:,i],
-                  na.log10(sph_grid[field][:,:,i]))
+                  np.log10(sph_grid[field][:,:,i]))
     pylab.savefig("polar/longitude_%03i.png" % i)
 """


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.convenience import \
     simulation
@@ -132,12 +132,12 @@
 
             # fill redshift space with datasets
             while ((z > near_redshift) and
-                   (na.fabs(z - near_redshift) > z_Tolerance)):
+                   (np.fabs(z - near_redshift) > z_Tolerance)):
 
                 # For first data dump, choose closest to desired redshift.
                 if (len(cosmology_splice) == 0):
                     # Sort data outputs by proximity to current redsfhit.
-                    self.splice_outputs.sort(key=lambda obj:na.fabs(z - \
+                    self.splice_outputs.sort(key=lambda obj:np.fabs(z - \
                         obj['redshift']))
                     cosmology_splice.append(self.splice_outputs[0])
 
@@ -146,7 +146,7 @@
                     current_slice = cosmology_splice[-1]
                     while current_slice['next'] is not None and \
                             (z < current_slice['next']['redshift'] or \
-                                 na.abs(z - current_slice['next']['redshift']) <
+                                 np.abs(z - current_slice['next']['redshift']) <
                                  z_Tolerance):
                         current_slice = current_slice['next']
 
@@ -164,7 +164,7 @@
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
             # Sort data outputs by proximity to current redsfhit.
-            self.splice_outputs.sort(key=lambda obj:na.fabs(far_redshift -
+            self.splice_outputs.sort(key=lambda obj:np.fabs(far_redshift -
                                                                     obj['redshift']))
             # For first data dump, choose closest to desired redshift.
             cosmology_splice.append(self.splice_outputs[0])
@@ -246,9 +246,9 @@
         outputs = []
 
         while z > near_redshift:
-            rounded = na.round(z, decimals=decimals)
+            rounded = np.round(z, decimals=decimals)
             if rounded - z < 0:
-                rounded += na.power(10.0, (-1.0*decimals))
+                rounded += np.power(10.0, (-1.0*decimals))
             z = rounded
 
             deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
@@ -289,7 +289,7 @@
             distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
               self.simulation.hubble_constant
 
-            while ((na.fabs(distance2-target_distance)/distance2) > d_Tolerance):
+            while ((np.fabs(distance2-target_distance)/distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
@@ -299,9 +299,9 @@
                 iteration += 1
                 if (iteration > max_Iterations):
                     mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, na.fabs(z2 - z)))
+                                (z, np.fabs(z2 - z)))
                     break
-            output['deltazMax'] = na.fabs(z2 - z)
+            output['deltazMax'] = np.fabs(z2 - z)
 
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
@@ -329,7 +329,7 @@
             distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
               self.simulation.hubble_constant
 
-            while ((na.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
+            while ((np.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
@@ -339,10 +339,10 @@
                 iteration += 1
                 if (iteration > max_Iterations):
                     mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, na.fabs(z2 - z)))
+                                (z, np.fabs(z2 - z)))
                     break
             # Use this calculation or the absolute minimum specified by the user.
-            output['deltazMin'] = max(na.fabs(z2 - z), deltaz_min)
+            output['deltazMin'] = max(np.fabs(z2 - z), deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -364,7 +364,7 @@
         distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
           self.cosmology.HubbleConstantNow / 100.0
 
-        while ((na.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
+        while ((np.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
             m = (distance2 - distance1) / (z2 - z1)
             z1 = z2
             distance1 = distance2
@@ -374,6 +374,6 @@
             iteration += 1
             if (iteration > max_Iterations):
                 mylog.error("deltaz_forward: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                            (z, na.fabs(z2 - z)))
+                            (z, np.fabs(z2 - z)))
                 break
-        return na.fabs(z2 - z)
+        return np.fabs(z2 - z)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
@@ -24,25 +24,25 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def common_volume(n_cube_1, n_cube_2, periodic=None):
     "Return the n-volume in common between the two n-cubes."
 
     # Check for proper args.
-    if ((len(na.shape(n_cube_1)) != 2) or
-        (na.shape(n_cube_1)[1] != 2) or
-        (na.shape(n_cube_1) != na.shape(n_cube_2))):
+    if ((len(np.shape(n_cube_1)) != 2) or
+        (np.shape(n_cube_1)[1] != 2) or
+        (np.shape(n_cube_1) != np.shape(n_cube_2))):
         print "Arguments must be 2 (n, 2) numpy array."
         return 0
 
     if ((periodic is not None) and
-        (na.shape(n_cube_1) != na.shape(periodic))):
+        (np.shape(n_cube_1) != np.shape(periodic))):
         print "periodic argument must be (n, 2) numpy array."
         return 0
 
     nCommon = 1.0
-    for q in range(na.shape(n_cube_1)[0]):
+    for q in range(np.shape(n_cube_1)[0]):
         if (periodic is None):
             nCommon *= common_segment(n_cube_1[q], n_cube_2[q])
         else:
@@ -97,10 +97,10 @@
             return min(flen1, flen2)
 
         # Adjust for periodicity
-        seg1[0] = na.mod(seg1[0], scale) + periodic[0]
+        seg1[0] = np.mod(seg1[0], scale) + periodic[0]
         seg1[1] = seg1[0] + len1
         if (seg1[1] > periodic[1]): seg1[1] -= scale
-        seg2[0] = na.mod(seg2[0], scale) + periodic[0]
+        seg2[0] = np.mod(seg2[0], scale) + periodic[0]
         seg2[1] = seg2[0] + len2
         if (seg2[1] > periodic[1]): seg2[1] -= scale
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.analysis_modules.halo_profiler.api import \
@@ -77,7 +77,7 @@
 
     # Write out cube of masks from each slice.
     if cube_file is not None:
-        _write_halo_mask(cube_file, na.array(light_cone_mask))
+        _write_halo_mask(cube_file, np.array(light_cone_mask))
 
     # Write out a text list of all halos in the image.
     if map_file is not None:
@@ -86,7 +86,7 @@
     # Write out final mask.
     if mask_file is not None:
         # Final mask is simply the product of the mask from each slice.
-        final_mask = na.ones(shape=(pixels, pixels))
+        final_mask = np.ones(shape=(pixels, pixels))
         for mask in light_cone_mask:
             final_mask *= mask
         _write_halo_mask(mask_file, final_mask)
@@ -103,7 +103,7 @@
     output = h5py.File(filename, 'a')
     if 'HaloMask' in output.keys():
         del output['HaloMask']
-    output.create_dataset('HaloMask', data=na.array(halo_mask))
+    output.create_dataset('HaloMask', data=np.array(halo_mask))
     output.close()
 
 @parallel_root_only
@@ -155,21 +155,21 @@
     # Make boolean mask and cut out halos.
     dx = slice['box_width_fraction'] / pixels
     x = [(q + 0.5) * dx for q in range(pixels)]
-    haloMask = na.ones(shape=(pixels, pixels), dtype=bool)
+    haloMask = np.ones(shape=(pixels, pixels), dtype=bool)
 
     # Cut out any pixel that has any part at all in the circle.
     for q in range(len(all_halo_radius)):
-        dif_xIndex = na.array(int(all_halo_x[q]/dx) -
-                              na.array(range(pixels))) != 0
-        dif_yIndex = na.array(int(all_halo_y[q]/dx) -
-                              na.array(range(pixels))) != 0
+        dif_xIndex = np.array(int(all_halo_x[q]/dx) -
+                              np.array(range(pixels))) != 0
+        dif_yIndex = np.array(int(all_halo_y[q]/dx) -
+                              np.array(range(pixels))) != 0
 
-        xDistance = (na.abs(x - all_halo_x[q]) -
+        xDistance = (np.abs(x - all_halo_x[q]) -
                      (0.5 * dx)) * dif_xIndex
-        yDistance = (na.abs(x - all_halo_y[q]) -
+        yDistance = (np.abs(x - all_halo_y[q]) -
                      (0.5 * dx)) * dif_yIndex
 
-        distance = na.array([na.sqrt(w**2 + xDistance**2)
+        distance = np.array([np.sqrt(w**2 + xDistance**2)
                              for w in yDistance])
         haloMask *= (distance >= all_halo_radius[q])
 
@@ -231,11 +231,11 @@
                                Mpc_units)
             halo_mass.append(halo['TotalMassMsun_%d' % virial_overdensity])
 
-    halo_x = na.array(halo_x)
-    halo_y = na.array(halo_y)
-    halo_depth = na.array(halo_depth)
-    halo_radius = na.array(halo_radius)
-    halo_mass = na.array(halo_mass)
+    halo_x = np.array(halo_x)
+    halo_y = np.array(halo_y)
+    halo_depth = np.array(halo_depth)
+    halo_radius = np.array(halo_radius)
+    halo_mass = np.array(halo_mass)
 
     # Adjust halo centers along line of sight.
     depth_center = slice['projection_center'][slice['projection_axis']]
@@ -247,15 +247,15 @@
     add_left = (halo_depth + halo_radius) > 1 # should be box width
     add_right = (halo_depth - halo_radius) < 0
 
-    halo_depth = na.concatenate([halo_depth,
+    halo_depth = np.concatenate([halo_depth,
                                  (halo_depth[add_left]-1),
                                  (halo_depth[add_right]+1)])
-    halo_x = na.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
-    halo_y = na.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
-    halo_radius = na.concatenate([halo_radius,
+    halo_x = np.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
+    halo_y = np.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
+    halo_radius = np.concatenate([halo_radius,
                                   halo_radius[add_left],
                                   halo_radius[add_right]])
-    halo_mass = na.concatenate([halo_mass,
+    halo_mass = np.concatenate([halo_mass,
                                 halo_mass[add_left],
                                 halo_mass[add_right]])
 
@@ -284,19 +284,19 @@
         del mask
     del halo_depth
 
-    all_halo_x = na.array([])
-    all_halo_y = na.array([])
-    all_halo_radius = na.array([])
-    all_halo_mass = na.array([])
+    all_halo_x = np.array([])
+    all_halo_y = np.array([])
+    all_halo_radius = np.array([])
+    all_halo_mass = np.array([])
 
     # Tile halos of width box fraction is greater than one.
     # Copy original into offset positions to make tiles.
-    for x in range(int(na.ceil(slice['box_width_fraction']))):
-        for y in range(int(na.ceil(slice['box_width_fraction']))):
-            all_halo_x = na.concatenate([all_halo_x, halo_x+x])
-            all_halo_y = na.concatenate([all_halo_y, halo_y+y])
-            all_halo_radius = na.concatenate([all_halo_radius, halo_radius])
-            all_halo_mass = na.concatenate([all_halo_mass, halo_mass])
+    for x in range(int(np.ceil(slice['box_width_fraction']))):
+        for y in range(int(np.ceil(slice['box_width_fraction']))):
+            all_halo_x = np.concatenate([all_halo_x, halo_x+x])
+            all_halo_y = np.concatenate([all_halo_y, halo_y+y])
+            all_halo_radius = np.concatenate([all_halo_radius, halo_radius])
+            all_halo_mass = np.concatenate([all_halo_mass, halo_mass])
 
     del halo_x, halo_y, halo_radius, halo_mass
 
@@ -310,8 +310,8 @@
 
     # Wrap off-edge centers back around to
     # other side (periodic boundary conditions).
-    all_halo_x[all_halo_x < 0] += na.ceil(slice['box_width_fraction'])
-    all_halo_y[all_halo_y < 0] += na.ceil(slice['box_width_fraction'])
+    all_halo_x[all_halo_x < 0] += np.ceil(slice['box_width_fraction'])
+    all_halo_y[all_halo_y < 0] += np.ceil(slice['box_width_fraction'])
 
     # After shifting, some centers have fractional coverage
     # on both sides of the box.
@@ -319,9 +319,9 @@
 
     # Centers hanging off the right edge.
     add_x_right = all_halo_x + all_halo_radius > \
-      na.ceil(slice['box_width_fraction'])
+      np.ceil(slice['box_width_fraction'])
     add_x_halo_x = all_halo_x[add_x_right]
-    add_x_halo_x -= na.ceil(slice['box_width_fraction'])
+    add_x_halo_x -= np.ceil(slice['box_width_fraction'])
     add_x_halo_y = all_halo_y[add_x_right]
     add_x_halo_radius = all_halo_radius[add_x_right]
     add_x_halo_mass = all_halo_mass[add_x_right]
@@ -330,7 +330,7 @@
     # Centers hanging off the left edge.
     add_x_left = all_halo_x - all_halo_radius < 0
     add2_x_halo_x = all_halo_x[add_x_left]
-    add2_x_halo_x += na.ceil(slice['box_width_fraction'])
+    add2_x_halo_x += np.ceil(slice['box_width_fraction'])
     add2_x_halo_y = all_halo_y[add_x_left]
     add2_x_halo_radius = all_halo_radius[add_x_left]
     add2_x_halo_mass = all_halo_mass[add_x_left]
@@ -338,10 +338,10 @@
 
     # Centers hanging off the top edge.
     add_y_right = all_halo_y + all_halo_radius > \
-      na.ceil(slice['box_width_fraction'])
+      np.ceil(slice['box_width_fraction'])
     add_y_halo_x = all_halo_x[add_y_right]
     add_y_halo_y = all_halo_y[add_y_right]
-    add_y_halo_y -= na.ceil(slice['box_width_fraction'])
+    add_y_halo_y -= np.ceil(slice['box_width_fraction'])
     add_y_halo_radius = all_halo_radius[add_y_right]
     add_y_halo_mass = all_halo_mass[add_y_right]
     del add_y_right
@@ -350,24 +350,24 @@
     add_y_left = all_halo_y - all_halo_radius < 0
     add2_y_halo_x = all_halo_x[add_y_left]
     add2_y_halo_y = all_halo_y[add_y_left]
-    add2_y_halo_y += na.ceil(slice['box_width_fraction'])
+    add2_y_halo_y += np.ceil(slice['box_width_fraction'])
     add2_y_halo_radius = all_halo_radius[add_y_left]
     add2_y_halo_mass = all_halo_mass[add_y_left]
     del add_y_left
 
     # Add the hanging centers back to the projection data.
-    all_halo_x = na.concatenate([all_halo_x,
+    all_halo_x = np.concatenate([all_halo_x,
                                  add_x_halo_x, add2_x_halo_x,
                                  add_y_halo_x, add2_y_halo_x])
-    all_halo_y = na.concatenate([all_halo_y,
+    all_halo_y = np.concatenate([all_halo_y,
                                  add_x_halo_y, add2_x_halo_y,
                                  add_y_halo_y, add2_y_halo_y])
-    all_halo_radius = na.concatenate([all_halo_radius,
+    all_halo_radius = np.concatenate([all_halo_radius,
                                       add_x_halo_radius,
                                       add2_x_halo_radius,
                                       add_y_halo_radius,
                                       add2_y_halo_radius])
-    all_halo_mass = na.concatenate([all_halo_mass,
+    all_halo_mass = np.concatenate([all_halo_mass,
                                     add_x_halo_mass,
                                     add2_x_halo_mass,
                                     add_y_halo_mass,


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 import os
 
 from yt.funcs import *
@@ -198,7 +198,7 @@
 
         # Calculate projection sizes, and get
         # random projection axes and centers.
-        na.random.seed(self.original_random_seed)
+        np.random.seed(self.original_random_seed)
 
         # For box coherence, keep track of effective depth travelled.
         box_fraction_used = 0.0
@@ -250,9 +250,9 @@
                self.light_cone_solution[q]['box_depth_fraction'] > 1.0):
                 # Random axis and center.
                 self.light_cone_solution[q]['projection_axis'] = \
-                  na.random.randint(0, 3)
+                  np.random.randint(0, 3)
                 self.light_cone_solution[q]['projection_center'] = \
-                  [na.random.random() for i in range(3)]
+                  [np.random.random() for i in range(3)]
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice,
@@ -342,7 +342,7 @@
                                                    njobs=njobs,
                                                    dynamic=dynamic)
             # Collapse cube into final mask.
-            self.halo_mask = na.ones(shape=(self.pixels, self.pixels),
+            self.halo_mask = np.ones(shape=(self.pixels, self.pixels),
                                      dtype=bool)
             for mask in halo_mask_cube:
                 self.halo_mask *= mask
@@ -428,7 +428,7 @@
                 boxSizeProper = self.simulation.box_size / \
                   (self.simulation.hubble_constant * (1.0 + output['redshift']))
                 pixelarea = (boxSizeProper/self.pixels)**2 #in proper cm^2
-                factor = pixelarea/(4.0*na.pi*dL**2)
+                factor = pixelarea/(4.0*np.pi*dL**2)
                 mylog.info("Distance to slice = %e" % dL)
                 frb[field] *= factor #in erg/s/cm^2/Hz on observer's image plane.
 
@@ -461,7 +461,7 @@
                 else:
                     my_image = all_storage[my_slice]['field'] / \
                       all_storage[my_slice]['weight_field']
-                only_on_root(write_image, na.log10(my_image),
+                only_on_root(write_image, np.log10(my_image),
                              "%s_%s.png" % (name, field), cmap_name=cmap_name)
 
             self.projection_stack.append(all_storage[my_slice]['field'])
@@ -491,7 +491,7 @@
 
         # Write image.
         if save_final_image:
-            only_on_root(write_image, na.log10(light_cone_projection),
+            only_on_root(write_image, np.log10(light_cone_projection),
                          "%s_%s.png" % (filename, field), cmap_name=cmap_name)
 
         # Write stack to hdf5 file.
@@ -561,7 +561,7 @@
         box_fraction_used = 0.0
 
         # Seed random number generator with new seed.
-        na.random.seed(int(new_seed))
+        np.random.seed(int(new_seed))
 
         for q, output in enumerate(self.light_cone_solution):
             # It is necessary to make the same number of calls to the random
@@ -578,9 +578,9 @@
                 # Get random projection axis and center.
                 # If recycling, axis will get thrown away since it is used in
                 # creating a unique projection object.
-                newAxis = na.random.randint(0, 3)
+                newAxis = np.random.randint(0, 3)
 
-                newCenter = [na.random.random() for i in range(3)]
+                newCenter = [np.random.random() for i in range(3)]
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice, but with depth center shifted.
@@ -600,8 +600,8 @@
             box_fraction_used += self.light_cone_solution[q]['box_depth_fraction']
 
             # Make list of rectangle corners to calculate common volume.
-            newCube = na.zeros(shape=(len(newCenter), 2))
-            oldCube = na.zeros(shape=(len(newCenter), 2))
+            newCube = np.zeros(shape=(len(newCenter), 2))
+            oldCube = np.zeros(shape=(len(newCenter), 2))
             for w in range(len(newCenter)):
                 if (w == self.master_solution[q]['projection_axis']):
                     oldCube[w] = [self.master_solution[q]['projection_center'][w] -
@@ -630,7 +630,7 @@
                                   0.5 * self.master_solution[q]['box_width_fraction']]
 
             my_volume += common_volume(oldCube, newCube,
-                                           periodic=na.array([[0, 1],
+                                           periodic=np.array([[0, 1],
                                                               [0, 1],
                                                               [0, 1]]))
             total_volume += output['box_depth_fraction'] * \
@@ -691,7 +691,7 @@
         "Save the light cone projection stack as a 3d array in and hdf5 file."
 
         # Make list of redshifts to include as a dataset attribute.
-        redshiftList = na.array([my_slice['redshift'] \
+        redshiftList = np.array([my_slice['redshift'] \
                                  for my_slice in self.light_cone_solution])
 
         field_node = "%s_%s" % (field, weight_field)
@@ -727,16 +727,16 @@
 
         if write_data:
             mylog.info("Saving %s to %s." % (field_node, filename))
-            self.projection_stack = na.array(self.projection_stack)
+            self.projection_stack = np.array(self.projection_stack)
             field_dataset = output.create_dataset(field_node,
                                                   data=self.projection_stack)
             field_dataset.attrs['redshifts'] = redshiftList
             field_dataset.attrs['observer_redshift'] = \
-              na.float(self.observer_redshift)
+              np.float(self.observer_redshift)
             field_dataset.attrs['field_of_view_in_arcminutes'] = \
-              na.float(self.field_of_view_in_arcminutes)
+              np.float(self.field_of_view_in_arcminutes)
             field_dataset.attrs['image_resolution_in_arcseconds'] = \
-              na.float(self.image_resolution_in_arcseconds)
+              np.float(self.image_resolution_in_arcseconds)
 
         if (len(self.projection_weight_field_stack) > 0):
             if node_exists:
@@ -754,16 +754,16 @@
             if write_data:
                 mylog.info("Saving %s to %s." % (weight_field_node, filename))
                 self.projection_weight_field_stack = \
-                  na.array(self.projection_weight_field_stack)
+                  np.array(self.projection_weight_field_stack)
                 weight_field_dataset = \
                   output.create_dataset(weight_field_node,
                                         data=self.projection_weight_field_stack)
                 weight_field_dataset.attrs['redshifts'] = redshiftList
                 weight_field_dataset.attrs['observer_redshift'] = \
-                  na.float(self.observer_redshift)
+                  np.float(self.observer_redshift)
                 weight_field_dataset.attrs['field_of_view_in_arcminutes'] = \
-                  na.float(self.field_of_view_in_arcminutes)
+                  np.float(self.field_of_view_in_arcminutes)
                 weight_field_dataset.attrs['image_resolution_in_arcseconds'] = \
-                  na.float(self.image_resolution_in_arcseconds)
+                  np.float(self.image_resolution_in_arcseconds)
 
         output.close()


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import copy
 
 from yt.funcs import *
@@ -98,15 +98,15 @@
     original_weight_field = copy.deepcopy(proj['weight_field'])
 
     # Copy original into offset positions to make tiles.
-    for x in range(int(na.ceil(lightConeSlice['box_width_fraction']))):
-        for y in range(int(na.ceil(lightConeSlice['box_width_fraction']))):
+    for x in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
+        for y in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
             if ((x + y) > 0):
-                proj['px'] = na.concatenate([proj['px'], original_px+x])
-                proj['py'] = na.concatenate([proj['py'], original_py+y])
-                proj['pdx'] = na.concatenate([proj['pdx'], original_pdx])
-                proj['pdy'] = na.concatenate([proj['pdy'], original_pdy])
-                proj[field] = na.concatenate([proj[field], original_field])
-                proj['weight_field'] = na.concatenate([proj['weight_field'],
+                proj['px'] = np.concatenate([proj['px'], original_px+x])
+                proj['py'] = np.concatenate([proj['py'], original_py+y])
+                proj['pdx'] = np.concatenate([proj['pdx'], original_pdx])
+                proj['pdy'] = np.concatenate([proj['pdy'], original_pdy])
+                proj[field] = np.concatenate([proj[field], original_field])
+                proj['weight_field'] = np.concatenate([proj['weight_field'],
                                                        original_weight_field])
 
     # Delete originals.
@@ -129,17 +129,17 @@
     proj['py'] -= offset[1]
 
     # Wrap off-edge cells back around to other side (periodic boundary conditions).
-    proj['px'][proj['px'] < 0] += na.ceil(lightConeSlice['box_width_fraction'])
-    proj['py'][proj['py'] < 0] += na.ceil(lightConeSlice['box_width_fraction'])
+    proj['px'][proj['px'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
+    proj['py'][proj['py'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
 
     # After shifting, some cells have fractional coverage on both sides of the box.
     # Find those cells and make copies to be placed on the other side.
 
     # Cells hanging off the right edge.
     add_x_right = proj['px'] + 0.5 * proj['pdx'] > \
-      na.ceil(lightConeSlice['box_width_fraction'])
+      np.ceil(lightConeSlice['box_width_fraction'])
     add_x_px = proj['px'][add_x_right]
-    add_x_px -= na.ceil(lightConeSlice['box_width_fraction'])
+    add_x_px -= np.ceil(lightConeSlice['box_width_fraction'])
     add_x_py = proj['py'][add_x_right]
     add_x_pdx = proj['pdx'][add_x_right]
     add_x_pdy = proj['pdy'][add_x_right]
@@ -150,7 +150,7 @@
     # Cells hanging off the left edge.
     add_x_left = proj['px'] - 0.5 * proj['pdx'] < 0
     add2_x_px = proj['px'][add_x_left]
-    add2_x_px += na.ceil(lightConeSlice['box_width_fraction'])
+    add2_x_px += np.ceil(lightConeSlice['box_width_fraction'])
     add2_x_py = proj['py'][add_x_left]
     add2_x_pdx = proj['pdx'][add_x_left]
     add2_x_pdy = proj['pdy'][add_x_left]
@@ -160,10 +160,10 @@
 
     # Cells hanging off the top edge.
     add_y_right = proj['py'] + 0.5 * proj['pdy'] > \
-      na.ceil(lightConeSlice['box_width_fraction'])
+      np.ceil(lightConeSlice['box_width_fraction'])
     add_y_px = proj['px'][add_y_right]
     add_y_py = proj['py'][add_y_right]
-    add_y_py -= na.ceil(lightConeSlice['box_width_fraction'])
+    add_y_py -= np.ceil(lightConeSlice['box_width_fraction'])
     add_y_pdx = proj['pdx'][add_y_right]
     add_y_pdy = proj['pdy'][add_y_right]
     add_y_field = proj[field][add_y_right]
@@ -174,7 +174,7 @@
     add_y_left = proj['py'] - 0.5 * proj['pdy'] < 0
     add2_y_px = proj['px'][add_y_left]
     add2_y_py = proj['py'][add_y_left]
-    add2_y_py += na.ceil(lightConeSlice['box_width_fraction'])
+    add2_y_py += np.ceil(lightConeSlice['box_width_fraction'])
     add2_y_pdx = proj['pdx'][add_y_left]
     add2_y_pdy = proj['pdy'][add_y_left]
     add2_y_field = proj[field][add_y_left]
@@ -182,17 +182,17 @@
     del add_y_left
 
     # Add the hanging cells back to the projection data.
-    proj['px'] = na.concatenate([proj['px'], add_x_px, add_y_px,
+    proj['px'] = np.concatenate([proj['px'], add_x_px, add_y_px,
                                  add2_x_px, add2_y_px])
-    proj['py'] = na.concatenate([proj['py'], add_x_py, add_y_py,
+    proj['py'] = np.concatenate([proj['py'], add_x_py, add_y_py,
                                  add2_x_py, add2_y_py])
-    proj['pdx'] = na.concatenate([proj['pdx'], add_x_pdx, add_y_pdx,
+    proj['pdx'] = np.concatenate([proj['pdx'], add_x_pdx, add_y_pdx,
                                   add2_x_pdx, add2_y_pdx])
-    proj['pdy'] = na.concatenate([proj['pdy'], add_x_pdy, add_y_pdy,
+    proj['pdy'] = np.concatenate([proj['pdy'], add_x_pdy, add_y_pdy,
                                   add2_x_pdy, add2_y_pdy])
-    proj[field] = na.concatenate([proj[field], add_x_field, add_y_field,
+    proj[field] = np.concatenate([proj[field], add_x_field, add_y_field,
                                   add2_x_field, add2_y_field])
-    proj['weight_field'] = na.concatenate([proj['weight_field'],
+    proj['weight_field'] = np.concatenate([proj['weight_field'],
                                            add_x_weight_field, add_y_weight_field,
                                            add2_x_weight_field, add2_y_weight_field])
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
@@ -24,7 +24,7 @@
 """
 
 import copy
-import numpy as na
+import numpy as np
 import random as rand
 import sys
 
@@ -128,7 +128,7 @@
         rand.seed(seed)
         state = rand.getstate()
 
-    fail_digits = str(int(na.log10(failures))+1)
+    fail_digits = str(int(np.log10(failures))+1)
 
     while (len(unique_seeds) < solutions):
         # Create new random seed.
@@ -221,7 +221,7 @@
         mylog.error("Light cone solutions do not have equal volumes, will use the smaller one.")
 
     for q in range(len(solution1)):
-        cube1 = na.zeros(shape=(len(solution1[q]['projection_center']), 2))
+        cube1 = np.zeros(shape=(len(solution1[q]['projection_center']), 2))
         volume1 = 1.0
         for w in range(len(cube1)):
             if (w == solution1[q]['projection_axis']):
@@ -232,7 +232,7 @@
             cube1[w] = [solution1[q]['projection_center'][w] - 0.5 * width,
                         solution1[q]['projection_center'][w] + 0.5 * width]
 
-        cube2 = na.zeros(shape=(len(solution2[q]['projection_center']), 2))
+        cube2 = np.zeros(shape=(len(solution2[q]['projection_center']), 2))
         volume2 = 1.0
         for w in range(len(cube2)):
             if (w == solution2[q]['projection_axis']):
@@ -245,7 +245,7 @@
 
         total_volume += min(volume1, volume2)
         my_volume += common_volume(cube1, cube2,
-                                   periodic=na.array([[0, 1],
+                                   periodic=np.array([[0, 1],
                                                       [0, 1],
                                                       [0, 1]]))
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -124,7 +124,7 @@
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
-        na.random.seed(seed)
+        np.random.seed(seed)
 
         # For box coherence, keep track of effective depth travelled.
         box_fraction_used = 0.0
@@ -162,9 +162,9 @@
                     (box_fraction_used +
                      self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
                 # Random start point
-                self.light_ray_solution[q]['start'] = na.random.random(3)
-                theta = na.pi * na.random.random()
-                phi = 2 * na.pi * na.random.random()
+                self.light_ray_solution[q]['start'] = np.random.random(3)
+                theta = np.pi * np.random.random()
+                phi = 2 * np.pi * np.random.random()
                 box_fraction_used = 0.0
             else:
                 # Use end point of previous segment and same theta and phi.
@@ -174,9 +174,9 @@
             self.light_ray_solution[q]['end'] = \
               self.light_ray_solution[q]['start'] + \
                 self.light_ray_solution[q]['traversal_box_fraction'] * \
-                na.array([na.cos(phi) * na.sin(theta),
-                          na.sin(phi) * na.sin(theta),
-                          na.cos(theta)])
+                np.array([np.cos(phi) * np.sin(theta),
+                          np.sin(phi) * np.sin(theta),
+                          np.cos(theta)])
             box_fraction_used += \
               self.light_ray_solution[q]['traversal_box_fraction']
 
@@ -365,30 +365,30 @@
             sub_data = {}
             sub_data['segment_redshift'] = my_segment['redshift']
             for field in all_fields:
-                sub_data[field] = na.array([])
+                sub_data[field] = np.array([])
 
             # Get data for all subsegments in segment.
             for sub_segment in sub_segments:
                 mylog.info("Getting subsegment: %s to %s." %
                            (list(sub_segment[0]), list(sub_segment[1])))
                 sub_ray = pf.h.ray(sub_segment[0], sub_segment[1])
-                sub_data['dl'] = na.concatenate([sub_data['dl'],
+                sub_data['dl'] = np.concatenate([sub_data['dl'],
                                                  (sub_ray['dts'] *
                                                   vector_length(sub_segment[0],
                                                                 sub_segment[1]))])
                 for field in fields:
-                    sub_data[field] = na.concatenate([sub_data[field],
+                    sub_data[field] = np.concatenate([sub_data[field],
                                                       (sub_ray[field])])
 
                 if get_los_velocity:
                     line_of_sight = sub_segment[1] - sub_segment[0]
                     line_of_sight /= ((line_of_sight**2).sum())**0.5
-                    sub_vel = na.array([sub_ray['x-velocity'],
+                    sub_vel = np.array([sub_ray['x-velocity'],
                                         sub_ray['y-velocity'],
                                         sub_ray['z-velocity']])
                     sub_data['los_velocity'] = \
-                      na.concatenate([sub_data['los_velocity'],
-                                      (na.rollaxis(sub_vel, 1) *
+                      np.concatenate([sub_data['los_velocity'],
+                                      (np.rollaxis(sub_vel, 1) *
                                        line_of_sight).sum(axis=1)])
                     del sub_vel
 
@@ -470,20 +470,20 @@
         if fields is None: fields = []
 
         # Create position array from halo list.
-        halo_centers = na.array(map(lambda halo: halo['center'], halo_list))
-        halo_field_values = dict([(field, na.array(map(lambda halo: halo[field],
+        halo_centers = np.array(map(lambda halo: halo['center'], halo_list))
+        halo_field_values = dict([(field, np.array(map(lambda halo: halo[field],
                                                        halo_list))) \
                                   for field in fields])
 
-        nearest_distance = na.zeros(data['x'].shape)
-        field_data = dict([(field, na.zeros(data['x'].shape)) \
+        nearest_distance = np.zeros(data['x'].shape)
+        field_data = dict([(field, np.zeros(data['x'].shape)) \
                            for field in fields])
         for index in xrange(nearest_distance.size):
-            nearest = na.argmin(periodic_distance(na.array([data['x'][index],
+            nearest = np.argmin(periodic_distance(np.array([data['x'][index],
                                                             data['y'][index],
                                                             data['z'][index]]),
                                                   halo_centers))
-            nearest_distance[index] = periodic_distance(na.array([data['x'][index],
+            nearest_distance[index] = periodic_distance(np.array([data['x'][index],
                                                                   data['y'][index],
                                                                   data['z'][index]]),
                                                         halo_centers[nearest])
@@ -532,41 +532,41 @@
         for field in [field for field in datum.keys()
                       if field not in exceptions]:
             if field in new_data:
-                new_data[field] = na.concatenate([new_data[field], datum[field]])
+                new_data[field] = np.concatenate([new_data[field], datum[field]])
             else:
-                new_data[field] = na.copy(datum[field])
+                new_data[field] = np.copy(datum[field])
     return new_data
 
 def vector_length(start, end):
     "Calculate vector length."
 
-    return na.sqrt(na.power((end - start), 2).sum())
+    return np.sqrt(np.power((end - start), 2).sum())
 
 def periodic_distance(coord1, coord2):
     "Calculate length of shortest vector between to points in periodic domain."
     dif = coord1 - coord2
 
-    dim = na.ones(coord1.shape,dtype=int)
+    dim = np.ones(coord1.shape,dtype=int)
     def periodic_bind(num):
-        pos = na.abs(num % dim)
-        neg = na.abs(num % -dim)
-        return na.min([pos,neg],axis=0)
+        pos = np.abs(num % dim)
+        neg = np.abs(num % -dim)
+        return np.min([pos,neg],axis=0)
 
     dif = periodic_bind(dif)
-    return na.sqrt((dif * dif).sum(axis=-1))
+    return np.sqrt((dif * dif).sum(axis=-1))
 
 def periodic_ray(start, end, left=None, right=None):
     "Break up periodic ray into non-periodic segments."
 
     if left is None:
-        left = na.zeros(start.shape)
+        left = np.zeros(start.shape)
     if right is None:
-        right = na.ones(start.shape)
+        right = np.ones(start.shape)
     dim = right - left
 
     vector = end - start
-    wall = na.zeros(start.shape)
-    close = na.zeros(start.shape, dtype=object)
+    wall = np.zeros(start.shape)
+    close = np.zeros(start.shape, dtype=object)
 
     left_bound = vector < 0
     right_bound = vector > 0
@@ -574,15 +574,15 @@
     bound = vector != 0.0
 
     wall[left_bound] = left[left_bound]
-    close[left_bound] = na.max
+    close[left_bound] = np.max
     wall[right_bound] = right[right_bound]
-    close[right_bound] = na.min
-    wall[no_bound] = na.inf
-    close[no_bound] = na.min
+    close[right_bound] = np.min
+    wall[no_bound] = np.inf
+    close[no_bound] = np.min
 
     segments = []
-    this_start = na.copy(start)
-    this_end = na.copy(end)
+    this_start = np.copy(start)
+    this_end = np.copy(end)
     t = 0.0
     tolerance = 1e-6
 
@@ -596,14 +596,14 @@
             this_start[hit_right] -= dim[hit_right]
             this_end[hit_right] -= dim[hit_right]
 
-        nearest = na.array([close[q]([this_end[q], wall[q]]) \
+        nearest = np.array([close[q]([this_end[q], wall[q]]) \
                                 for q in range(start.size)])
         dt = ((nearest - this_start) / vector)[bound].min()
         now = this_start + vector * dt
-        close_enough = na.abs(now - nearest) < 1e-10
+        close_enough = np.abs(now - nearest) < 1e-10
         now[close_enough] = nearest[close_enough]
-        segments.append([na.copy(this_start), na.copy(now)])
-        this_start = na.copy(now)
+        segments.append([np.copy(this_start), np.copy(now)])
+        this_start = np.copy(now)
         t += dt
 
     return segments


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -31,7 +31,7 @@
 import h5py
 import itertools
 import math
-import numpy as na
+import numpy as np
 import random
 import sys
 import os.path as path
@@ -123,13 +123,13 @@
         cy = self["particle_position_y"]
         cz = self["particle_position_z"]
         if isinstance(self, FOFHalo):
-            c_vec = na.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
+            c_vec = np.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
         else:
             c_vec = self.maximum_density_location() - self.pf.domain_center
         cx = (cx - c_vec[0])
         cy = (cy - c_vec[1])
         cz = (cz - c_vec[2])
-        com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
+        com = np.array([v - np.floor(v) for v in [cx, cy, cz]])
         return (com * pm).sum(axis=1) / pm.sum() + c_vec
 
     def maximum_density(self):
@@ -158,7 +158,7 @@
         """
         if self.max_dens_point is not None:
             return self.max_dens_point[1:]
-        return na.array([
+        return np.array([
                 self._max_dens[self.id][1],
                 self._max_dens[self.id][2],
                 self._max_dens[self.id][3]])
@@ -193,7 +193,7 @@
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
-        return na.array([vx, vy, vz]) / pm.sum()
+        return np.array([vx, vy, vz]) / pm.sum()
 
     def rms_velocity(self):
         r"""Returns the mass-weighted RMS velocity for the halo
@@ -216,8 +216,8 @@
         vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
         vz = (self["particle_velocity_z"] - bv[2]) * pm / sm
         s = vx ** 2. + vy ** 2. + vz ** 2.
-        ms = na.mean(s)
-        return na.sqrt(ms) * pm.size
+        ms = np.mean(s)
+        return np.sqrt(ms) * pm.size
 
     def maximum_radius(self, center_of_mass=True):
         r"""Returns the maximum radius in the halo for all particles,
@@ -246,13 +246,13 @@
             center = self.center_of_mass()
         else:
             center = self.maximum_density_location()
-        rx = na.abs(self["particle_position_x"] - center[0])
-        ry = na.abs(self["particle_position_y"] - center[1])
-        rz = na.abs(self["particle_position_z"] - center[2])
+        rx = np.abs(self["particle_position_x"] - center[0])
+        ry = np.abs(self["particle_position_y"] - center[1])
+        rz = np.abs(self["particle_position_z"] - center[2])
         DW = self.data.pf.domain_right_edge - self.data.pf.domain_left_edge
-        r = na.sqrt(na.minimum(rx, DW[0] - rx) ** 2.0
-                + na.minimum(ry, DW[1] - ry) ** 2.0
-                + na.minimum(rz, DW[2] - rz) ** 2.0)
+        r = np.sqrt(np.minimum(rx, DW[0] - rx) ** 2.0
+                + np.minimum(ry, DW[1] - ry) ** 2.0
+                + np.minimum(rz, DW[2] - rz) ** 2.0)
         return r.max()
 
     def __getitem__(self, key):
@@ -393,7 +393,7 @@
         self.virial_info(bins=bins)
         over = (self.overdensity > virial_overdensity)
         if (over == True).any():
-            vir_bin = max(na.arange(bins + 1)[over])
+            vir_bin = max(np.arange(bins + 1)[over])
             return vir_bin
         else:
             return -1
@@ -419,8 +419,8 @@
         Msun2g = mass_sun_cgs
         rho_crit = rho_crit * ((1.0 + z) ** 3.0)
         # Get some pertinent information about the halo.
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
-        dist = na.empty(thissize, dtype='float64')
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
+        dist = np.empty(thissize, dtype='float64')
         cen = self.center_of_mass()
         mark = 0
         # Find the distances to the particles. I don't like this much, but I
@@ -432,15 +432,15 @@
             mark += 1
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(min(dist) * .99 + TINY),
+        self.radial_bins = np.logspace(math.log10(min(dist) * .99 + TINY),
             math.log10(max(dist) * 1.01 + 2 * TINY), num=self.bin_count + 1)
         # Find out which bin each particle goes into, and add the particle
         # mass to that bin.
-        inds = na.digitize(dist, self.radial_bins) - 1
+        inds = np.digitize(dist, self.radial_bins) - 1
         if self["particle_position_x"].size > 1:
-            for index in na.unique(inds):
+            for index in np.unique(inds):
                 self.mass_bins[index] += \
-                na.sum(self["ParticleMassMsun"][inds == index])
+                np.sum(self["ParticleMassMsun"][inds == index])
         # Now forward sum the masses in the bins.
         for i in xrange(self.bin_count):
             self.mass_bins[i + 1] += self.mass_bins[i]
@@ -450,12 +450,12 @@
         (self.radial_bins * cm)**3.0)
         
     def _get_ellipsoid_parameters_basic(self):
-        na.seterr(all='ignore')
+        np.seterr(all='ignore')
         # check if there are 4 particles to form an ellipsoid
         # neglecting to check if the 4 particles in the same plane,
         # that is almost certainly never to occur,
         # will deal with it later if it ever comes up
-        if na.size(self["particle_position_x"]) < 4:
+        if np.size(self["particle_position_x"]) < 4:
             mylog.warning("Too few particles for ellipsoid parameters.")
             return (0, 0, 0, 0, 0, 0, 0)
         # Calculate the parameters that describe the ellipsoid of
@@ -466,19 +466,19 @@
 		    self["particle_position_y"],
 		    self["particle_position_z"]]
         # Locate the furthest particle from com, its vector length and index
-	DW = na.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
+	DW = np.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
 	position = [position[0] - com[0],
 		    position[1] - com[1],
 		    position[2] - com[2]]
 	# different cases of particles being on other side of boundary
-	for axis in range(na.size(DW)):
-	    cases = na.array([position[axis],
+	for axis in range(np.size(DW)):
+	    cases = np.array([position[axis],
 	  		      position[axis] + DW[axis],
 			      position[axis] - DW[axis]])        
             # pick out the smallest absolute distance from com
-            position[axis] = na.choose(na.abs(cases).argmin(axis=0), cases)
+            position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases)
 	# find the furthest particle's index
-	r = na.sqrt(position[0]**2 +
+	r = np.sqrt(position[0]**2 +
 		    position[1]**2 +
 		    position[2]**2)
         A_index = r.argmax()
@@ -490,24 +490,24 @@
         # designate the e0 unit vector
         e0_vector = A_vector / mag_A
         # locate the tB particle position by finding the max B
-	e0_vector_copy = na.empty((na.size(position[0]), 3), dtype='float64')
+	e0_vector_copy = np.empty((np.size(position[0]), 3), dtype='float64')
         for i in range(3):
             e0_vector_copy[:, i] = e0_vector[i]
-        rr = na.array([position[0],
+        rr = np.array([position[0],
 		       position[1],
 		       position[2]]).T # Similar to tB_vector in old code.
-        tC_vector = na.cross(e0_vector_copy, rr)
+        tC_vector = np.cross(e0_vector_copy, rr)
         te2 = tC_vector.copy()
         for dim in range(3):
-            te2[:,dim] *= na.sum(tC_vector**2., axis = 1)**(-0.5)
-        te1 = na.cross(te2, e0_vector_copy)
-        length = na.abs(-na.sum(rr * te1, axis = 1) * \
-            (1. - na.sum(rr * e0_vector_copy, axis = 1)**2. * \
+            te2[:,dim] *= np.sum(tC_vector**2., axis = 1)**(-0.5)
+        te1 = np.cross(te2, e0_vector_copy)
+        length = np.abs(-np.sum(rr * te1, axis = 1) * \
+            (1. - np.sum(rr * e0_vector_copy, axis = 1)**2. * \
             mag_A**-2.)**(-0.5))
         # This problem apparently happens sometimes, that the NaNs are turned
         # into infs, which messes up the nanargmax below.
-        length[length == na.inf] = 0.
-        tB_index = na.nanargmax(length) # ignores NaNs created above.
+        length[length == np.inf] = 0.
+        tB_index = np.nanargmax(length) # ignores NaNs created above.
         mag_B = length[tB_index]
         e1_vector = te1[tB_index]
         e2_vector = te2[tB_index]
@@ -518,24 +518,24 @@
             temp_e0[:,dim] = e0_vector[dim]
             temp_e1[:,dim] = e1_vector[dim]
             temp_e2[:,dim] = e2_vector[dim]
-        length = na.abs(na.sum(rr * temp_e2, axis = 1) * (1 - \
-            na.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \
-            na.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2)**(-0.5))
-        length[length == na.inf] = 0.
-        tC_index = na.nanargmax(length)
+        length = np.abs(np.sum(rr * temp_e2, axis = 1) * (1 - \
+            np.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \
+            np.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2)**(-0.5))
+        length[length == np.inf] = 0.
+        tC_index = np.nanargmax(length)
         mag_C = length[tC_index]
         # tilt is calculated from the rotation about x axis
         # needed to align e1 vector with the y axis
         # after e0 is aligned with x axis
         # find the t1 angle needed to rotate about z axis to align e0 to x
-        t1 = na.arctan(e0_vector[1] / e0_vector[0])
+        t1 = np.arctan(e0_vector[1] / e0_vector[0])
         RZ = get_rotation_matrix(-t1, (0, 0, 1)).transpose()
         r1 = (e0_vector * RZ).sum(axis = 1)
         # find the t2 angle needed to rotate about y axis to align e0 to x
-        t2 = na.arctan(-r1[2] / r1[0])
+        t2 = np.arctan(-r1[2] / r1[0])
         RY = get_rotation_matrix(-t2, (0, 1, 0)).transpose()
-        r2 = na.dot(RY, na.dot(RZ, e1_vector))
-        tilt = na.arctan(r2[2]/r2[1])
+        r2 = np.dot(RY, np.dot(RZ, e1_vector))
+        tilt = np.arctan(r2[2]/r2[1])
         return (mag_A, mag_B, mag_C, e0_vector[0], e0_vector[1],
             e0_vector[2], tilt)
 
@@ -572,11 +572,11 @@
 
         #Halo.__init__(self,halo_list,index,
         self.size=Np 
-        self.CoM=na.array([X,Y,Z])
+        self.CoM=np.array([X,Y,Z])
         self.max_dens_point=-1
         self.group_total_mass=-1
         self.max_radius=Rvir
-        self.bulk_vel=na.array([VX,VY,VZ])*1e5
+        self.bulk_vel=np.array([VX,VY,VZ])*1e5
         self.rms_vel=-1
         self.group_total_mass = -1 #not implemented 
     
@@ -651,7 +651,7 @@
         basic_parameters = self._get_ellipsoid_parameters_basic()
         toreturn = [self.center_of_mass()]
         updated = [basic_parameters[0], basic_parameters[1],
-            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[2], np.array([basic_parameters[3],
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
@@ -704,7 +704,7 @@
         self.bin_count = bins
         period = self.data.pf.domain_right_edge - \
             self.data.pf.domain_left_edge
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
         cen = self.center_of_mass()
         # Cosmology
         h = self.data.pf.hubble_constant
@@ -716,7 +716,7 @@
         # If I own some of this halo operate on the particles.
         if self.indices is not None:
             # Get some pertinent information about the halo.
-            dist = na.empty(self.indices.size, dtype='float64')
+            dist = np.empty(self.indices.size, dtype='float64')
             mark = 0
             # Find the distances to the particles.
             # I don't like this much, but I
@@ -737,15 +737,15 @@
         dist_max = self.comm.mpi_allreduce(dist_max, op='max')
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(dist_min * .99 + TINY),
+        self.radial_bins = np.logspace(math.log10(dist_min * .99 + TINY),
             math.log10(dist_max * 1.01 + 2 * TINY), num=self.bin_count + 1)
         if self.indices is not None and self.indices.size > 1:
             # Find out which bin each particle goes into, and add the particle
             # mass to that bin.
-            inds = na.digitize(dist, self.radial_bins) - 1
-            for index in na.unique(inds):
+            inds = np.digitize(dist, self.radial_bins) - 1
+            for index in np.unique(inds):
                 self.mass_bins[index] += \
-                    na.sum(self["ParticleMassMsun"][inds == index])
+                    np.sum(self["ParticleMassMsun"][inds == index])
             # Now forward sum the masses in the bins.
             for i in xrange(self.bin_count):
                 self.mass_bins[i + 1] += self.mass_bins[i]
@@ -831,7 +831,7 @@
         self.saved_fields = {}
         self.particle_mask = None
         self.ds_sort = None
-        self.indices = na.array([])  # Never used for a LoadedHalo.
+        self.indices = np.array([])  # Never used for a LoadedHalo.
         # A supplementary data dict.
         if supp is None:
             self.supp = {}
@@ -871,7 +871,7 @@
                     # The result of searchsorted is an array with the positions
                     # of the indexes in pid as they are in sp_pid. This is
                     # because each element of pid is in sp_pid only once.
-                    self.particle_mask = na.searchsorted(sp_pid, pid)
+                    self.particle_mask = np.searchsorted(sp_pid, pid)
                 # We won't store this field below in saved_fields because
                 # that would mean keeping two copies of it, one in the yt
                 # machinery and one here.
@@ -890,9 +890,9 @@
             return None
         elif field == 'particle_index' or field == 'particle_type':
             # the only integer field
-            field_data = na.empty(size, dtype='int64')
+            field_data = np.empty(size, dtype='int64')
         else:
-            field_data = na.empty(size, dtype='float64')
+            field_data = np.empty(size, dtype='float64')
         f.close()
         # Apparently, there's a bug in h5py that was keeping the file pointer
         # f closed, even though it's re-opened below. This del seems to fix
@@ -943,7 +943,7 @@
         basic_parameters = self._get_ellipsoid_parameters_basic_loadedhalo()
         toreturn = [self.center_of_mass()]
         updated = [basic_parameters[0], basic_parameters[1],
-            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[2], np.array([basic_parameters[3],
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
@@ -1025,7 +1025,7 @@
         self.tilt = tilt
         self.bin_count = None
         self.overdensity = None
-        self.indices = na.array([])  # Never used for a LoadedHalo.
+        self.indices = np.array([])  # Never used for a LoadedHalo.
         # A supplementary data dict.
         if supp is None:
             self.supp = {}
@@ -1084,7 +1084,7 @@
                 self.particle_fields[field] = \
                     self._data_source[field][ii].astype('float64')
             del self._data_source[field]
-        self._base_indices = na.arange(tot_part)[ii]
+        self._base_indices = np.arange(tot_part)[ii]
         gc.collect()
 
     def _get_dm_indices(self):
@@ -1099,10 +1099,10 @@
             return slice(None)
 
     def _parse_output(self):
-        unique_ids = na.unique(self.tags)
-        counts = na.bincount(self.tags + 1)
-        sort_indices = na.argsort(self.tags)
-        grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
+        unique_ids = np.unique(self.tags)
+        counts = np.bincount(self.tags + 1)
+        sort_indices = np.argsort(self.tags)
+        grab_indices = np.indices(self.tags.shape).ravel()[sort_indices]
         dens = self.densities[sort_indices]
         cp = 0
         for i in unique_ids:
@@ -1112,7 +1112,7 @@
                 continue
             group_indices = grab_indices[cp:cp_c]
             self._groups.append(self._halo_class(self, i, group_indices))
-            md_i = na.argmax(dens[cp:cp_c])
+            md_i = np.argmax(dens[cp:cp_c])
             px, py, pz = \
                 [self.particle_fields['particle_position_%s' % ax][group_indices]
                                             for ax in 'xyz']
@@ -1201,7 +1201,7 @@
         """
         # Set up a vector to multiply other
         # vectors by to project along proj_dim
-        vec = na.array([1., 1., 1.])
+        vec = np.array([1., 1., 1.])
         vec[proj_dim] = 0.
         period = self.pf.domain_right_edge - self.pf.domain_left_edge
         period = period * vec
@@ -1367,9 +1367,9 @@
         splits = filter(lambda x: len(x.strip()) > 0 ,line.split(' '))
         for num in splits:
             if 'nan' not in num:
-                formats += na.array(eval(num)).dtype,
+                formats += np.array(eval(num)).dtype,
             else:
-                formats += na.dtype('float'),
+                formats += np.dtype('float'),
         assert len(formats) == len(names)
 
         #Jc = 1.98892e33/pf['mpchcm']*1e5
@@ -1384,7 +1384,7 @@
                     Rs=1.0/pf['kpchcm'],
                     JX=Jc,JY=Jc,JZ=Jc)
         dtype = {'names':names,'formats':formats}
-        halo_table = na.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
+        halo_table = np.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
         #convert position units  
         for name in names:
             halo_table[name]=halo_table[name]*conv.get(name,1)
@@ -1470,7 +1470,7 @@
                self.particle_fields["particle_position_y"] / self.period[1],
                self.particle_fields["particle_position_z"] / self.period[2],
                self.link)
-        self.densities = na.ones(self.tags.size, dtype='float64') * -1
+        self.densities = np.ones(self.tags.size, dtype='float64') * -1
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
 
@@ -1518,12 +1518,12 @@
             size = int(line[2])
             fnames = locations[halo]
             # Everything else
-            CoM = na.array([float(line[7]), float(line[8]), float(line[9])])
-            max_dens_point = na.array([float(line[3]), float(line[4]),
+            CoM = np.array([float(line[7]), float(line[8]), float(line[9])])
+            max_dens_point = np.array([float(line[3]), float(line[4]),
                 float(line[5]), float(line[6])])
             group_total_mass = float(line[1])
             max_radius = float(line[13])
-            bulk_vel = na.array([float(line[10]), float(line[11]),
+            bulk_vel = np.array([float(line[10]), float(line[11]),
                 float(line[12])])
             rms_vel = float(line[14])
             if len(line) == 15:
@@ -1541,7 +1541,7 @@
                 e1_vec0 = float(line[18])
                 e1_vec1 = float(line[19])
                 e1_vec2 = float(line[20])
-                e1_vec = na.array([e1_vec0, e1_vec1, e1_vec2])
+                e1_vec = np.array([e1_vec0, e1_vec1, e1_vec2])
                 tilt = float(line[21])
                 self._groups.append(LoadedHalo(self.pf, halo, size = size,
                     CoM = CoM,
@@ -1596,7 +1596,7 @@
             y = float(line[columns['y']])
             z = float(line[columns['z']])
             r = float(line[columns['r']])
-            cen = na.array([x, y, z])
+            cen = np.array([x, y, z])
             # Now we see if there's anything else.
             if extra:
                 temp_dict = {}
@@ -1631,7 +1631,7 @@
         self.rearrange = rearrange
         self.period = period
         self.old_period = period.copy()
-        self.period = na.array([1.] * 3)
+        self.period = np.array([1.] * 3)
         self._data_source = data_source
         self.premerge = premerge
         self.tree = tree
@@ -1645,20 +1645,20 @@
         if (self.particle_fields["particle_index"] < 0).any():
             mylog.error("Negative values in particle_index field. Parallel HOP will fail.")
             exit = True
-        if na.unique(self.particle_fields["particle_index"]).size != \
+        if np.unique(self.particle_fields["particle_index"]).size != \
                 self.particle_fields["particle_index"].size:
             mylog.error("Non-unique values in particle_index field. Parallel HOP will fail.")
             exit = True
 
         self.comm.mpi_exit_test(exit)
         # Try to do this in a memory conservative way.
-        na.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
+        np.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
             self.particle_fields['ParticleMassMsun'])
-        na.divide(self.particle_fields["particle_position_x"],
+        np.divide(self.particle_fields["particle_position_x"],
             self.old_period[0], self.particle_fields["particle_position_x"])
-        na.divide(self.particle_fields["particle_position_y"],
+        np.divide(self.particle_fields["particle_position_y"],
             self.old_period[1], self.particle_fields["particle_position_y"])
-        na.divide(self.particle_fields["particle_position_z"],
+        np.divide(self.particle_fields["particle_position_z"],
             self.old_period[2], self.particle_fields["particle_position_z"])
         obj = ParallelHOPHaloFinder(self.period, self.padding,
             self.num_neighbors, self.bounds,
@@ -1688,20 +1688,20 @@
         self.period = self.old_period.copy()
         # Precompute the bulk velocity in parallel.
         yt_counters("Precomp bulk vel.")
-        self.bulk_vel = na.zeros((self.group_count, 3), dtype='float64')
+        self.bulk_vel = np.zeros((self.group_count, 3), dtype='float64')
         yt_counters("bulk vel. reading data")
         pm = obj.mass
         # Fix this back to un-normalized units.
-        na.multiply(pm, self.total_mass, pm)
+        np.multiply(pm, self.total_mass, pm)
         xv = self._data_source["particle_velocity_x"][self._base_indices]
         yv = self._data_source["particle_velocity_y"][self._base_indices]
         zv = self._data_source["particle_velocity_z"][self._base_indices]
         yt_counters("bulk vel. reading data")
         yt_counters("bulk vel. computing")
         select = (self.tags >= 0)
-        calc = len(na.where(select == True)[0])
+        calc = len(np.where(select == True)[0])
         if calc:
-            vel = na.empty((calc, 3), dtype='float64')
+            vel = np.empty((calc, 3), dtype='float64')
             ms = pm[select]
             vel[:, 0] = xv[select] * ms
             vel[:, 1] = yv[select] * ms
@@ -1710,13 +1710,13 @@
             sort = subchain.argsort()
             vel = vel[sort]
             sort_subchain = subchain[sort]
-            uniq_subchain = na.unique(sort_subchain)
-            diff_subchain = na.ediff1d(sort_subchain)
+            uniq_subchain = np.unique(sort_subchain)
+            diff_subchain = np.ediff1d(sort_subchain)
             marks = (diff_subchain > 0)
-            marks = na.arange(calc)[marks] + 1
-            marks = na.concatenate(([0], marks, [calc]))
+            marks = np.arange(calc)[marks] + 1
+            marks = np.concatenate(([0], marks, [calc]))
             for i, u in enumerate(uniq_subchain):
-                self.bulk_vel[u] = na.sum(vel[marks[i]:marks[i + 1]], axis=0)
+                self.bulk_vel[u] = np.sum(vel[marks[i]:marks[i + 1]], axis=0)
             del vel, subchain, sort_subchain
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
@@ -1729,27 +1729,27 @@
         # Now calculate the RMS velocity of the groups in parallel, very
         # similarly to the bulk velocity and re-using some of the arrays.
         yt_counters("rms vel computing")
-        rms_vel_temp = na.zeros((self.group_count, 2), dtype='float64')
+        rms_vel_temp = np.zeros((self.group_count, 2), dtype='float64')
         if calc:
-            vel = na.empty((calc, 3), dtype='float64')
+            vel = np.empty((calc, 3), dtype='float64')
             vel[:, 0] = xv[select] * ms
             vel[:, 1] = yv[select] * ms
             vel[:, 2] = zv[select] * ms
             vel = vel[sort]
             for i, u in enumerate(uniq_subchain):
                 # This finds the sum locally.
-                rms_vel_temp[u][0] = na.sum(((vel[marks[i]:marks[i + 1]] - \
+                rms_vel_temp[u][0] = np.sum(((vel[marks[i]:marks[i + 1]] - \
                     self.bulk_vel[u]) / self.Tot_M[u]) ** 2.)
                 # I could use self.group_sizes...
                 rms_vel_temp[u][1] = marks[i + 1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
         rms_vel_temp = self.comm.mpi_allreduce(rms_vel_temp, op='sum')
-        self.rms_vel = na.empty(self.group_count, dtype='float64')
+        self.rms_vel = np.empty(self.group_count, dtype='float64')
         for groupID in xrange(self.group_count):
             # Here we do the Mean and the Root.
             self.rms_vel[groupID] = \
-                na.sqrt(rms_vel_temp[groupID][0] / rms_vel_temp[groupID][1]) * \
+                np.sqrt(rms_vel_temp[groupID][0] / rms_vel_temp[groupID][1]) * \
                 self.group_sizes[groupID]
         del rms_vel_temp
         yt_counters("rms vel computing")
@@ -1764,16 +1764,16 @@
         """
         Each task will make an entry for all groups, but it may be empty.
         """
-        unique_ids = na.unique(self.tags)
-        counts = na.bincount((self.tags + 1).tolist())
-        sort_indices = na.argsort(self.tags)
-        grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
+        unique_ids = np.unique(self.tags)
+        counts = np.bincount((self.tags + 1).tolist())
+        sort_indices = np.argsort(self.tags)
+        grab_indices = np.indices(self.tags.shape).ravel()[sort_indices]
         del sort_indices
         cp = 0
         index = 0
         # We want arrays for parallel HOP
-        self._groups = na.empty(self.group_count, dtype='object')
-        self._max_dens = na.empty((self.group_count, 4), dtype='float64')
+        self._groups = np.empty(self.group_count, dtype='object')
+        self._max_dens = np.empty((self.group_count, 4), dtype='float64')
         if self.group_count == 0:
             mylog.info("There are no halos found.")
             return
@@ -1861,7 +1861,7 @@
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.hierarchy = pf.h
-        self.center = (na.array(ds.right_edge) + na.array(ds.left_edge)) / 2.0
+        self.center = (np.array(ds.right_edge) + np.array(ds.left_edge)) / 2.0
 
     def _parse_halolist(self, threshold_adjustment):
         groups = []
@@ -1871,7 +1871,7 @@
         for halo in self._groups:
             this_max_dens = halo.maximum_density_location()
             # if the most dense particle is in the box, keep it
-            if na.all((this_max_dens >= LE) & (this_max_dens <= RE)):
+            if np.all((this_max_dens >= LE) & (this_max_dens <= RE)):
                 # Now we add the halo information to OURSELVES, taken from the
                 # self.hop_list
                 # We need to mock up the HOPHaloList thingie, so we need to
@@ -2128,8 +2128,8 @@
         >>> halos = parallelHF(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
             padding=0.0)
@@ -2141,7 +2141,7 @@
         if self.tree != 'F' and self.tree != 'C':
             mylog.error("No kD Tree specified!")
         period = pf.domain_right_edge - pf.domain_left_edge
-        topbounds = na.array([[0., 0., 0.], period])
+        topbounds = np.array([[0., 0., 0.], period])
         # Cut up the volume evenly initially, with no padding.
         padded, LE, RE, self._data_source = \
             self.partition_hierarchy_3d(ds=self._data_source,
@@ -2190,14 +2190,14 @@
             # approximation, but it's OK with the safety factor
             padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
-            self.padding = (na.ones(3, dtype='float64') * padding,
-                na.ones(3, dtype='float64') * padding)
+            self.padding = (np.ones(3, dtype='float64') * padding,
+                np.ones(3, dtype='float64') * padding)
             mylog.info('padding %s avg_spacing %f vol %f local_parts %d' % \
                 (str(self.padding), avg_spacing, vol, num_particles))
         # Another approach to padding, perhaps more accurate.
         elif fancy_padding and self._distributed:
-            LE_padding = na.empty(3, dtype='float64')
-            RE_padding = na.empty(3, dtype='float64')
+            LE_padding = np.empty(3, dtype='float64')
+            RE_padding = np.empty(3, dtype='float64')
             avg_spacing = (float(vol) / data.size) ** (1. / 3.)
             base_padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
@@ -2215,9 +2215,9 @@
                     self._data_source.left_edge[(dim + 2) % 3])
                 bin_width = base_padding
                 num_bins = int(math.ceil(width / bin_width))
-                bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+                bins = np.arange(num_bins + 1, dtype='float64') * bin_width + \
                     self._data_source.left_edge[dim]
-                counts, bins = na.histogram(data, bins)
+                counts, bins = np.histogram(data, bins)
                 # left side.
                 start = 0
                 count = counts[0]
@@ -2250,8 +2250,8 @@
             total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(),
                                                  op='sum')
         if not self._distributed:
-            self.padding = (na.zeros(3, dtype='float64'),
-                na.zeros(3, dtype='float64'))
+            self.padding = (np.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'))
         # If we're using a subvolume, we now re-divide.
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
@@ -2282,8 +2282,8 @@
         n_random = int(adjust * float(random_points) / self.comm.size)
         mylog.info("Reading in %d random particles." % n_random)
         # Get unique random particles.
-        my_points = na.empty((n_random, 3), dtype='float64')
-        uni = na.array(random.sample(xrange(xp.size), n_random))
+        my_points = np.empty((n_random, 3), dtype='float64')
+        uni = np.array(random.sample(xrange(xp.size), n_random))
         uni = uni[uni.argsort()]
         my_points[:, 0] = xp[uni]
         del xp
@@ -2297,10 +2297,10 @@
         mine, sizes = self.comm.mpi_info_dict(n_random)
         if mine == 0:
             tot_random = sum(sizes.values())
-            root_points = na.empty((tot_random, 3), dtype='float64')
+            root_points = np.empty((tot_random, 3), dtype='float64')
             root_points.shape = (1, 3 * tot_random)
         else:
-            root_points = na.empty([])
+            root_points = np.empty([])
         my_points.shape = (1, n_random * 3)
         root_points = self.comm.par_combine_object(my_points[0],
                 datatype="array", op="cat")
@@ -2315,9 +2315,9 @@
         num_bins = 1000
         width = bounds[1][dim] - bounds[0][dim]
         bin_width = width / num_bins
-        bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+        bins = np.arange(num_bins + 1, dtype='float64') * bin_width + \
             bounds[0][dim]
-        counts, bins = na.histogram(points[:, dim], bins)
+        counts, bins = np.histogram(points[:, dim], bins)
         # Find the bin that passes the cut points.
         midpoints = [bounds[0][dim]]
         sum = 0
@@ -2341,7 +2341,7 @@
         subpoints = []
         subbounds = []
         for pair in zip(midpoints[:-1], midpoints[1:]):
-            select = na.bitwise_and(points[:, dim] >= pair[0],
+            select = np.bitwise_and(points[:, dim] >= pair[0],
                 points[:, dim] < pair[1])
             subpoints.append(points[select])
             nb = bounds.copy()
@@ -2363,7 +2363,7 @@
         ms = -self.Tot_M.copy()
         del self.Tot_M
         Cx = self.CoM[:, 0].copy()
-        sorted = na.lexsort([Cx, ms])
+        sorted = np.lexsort([Cx, ms])
         del Cx, ms
         self._groups = self._groups[sorted]
         self._max_dens = self._max_dens[sorted]
@@ -2426,8 +2426,8 @@
         >>> halos = HaloFinder(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
@@ -2520,8 +2520,8 @@
         >>> halos = FOFHaloFinder(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self.pf = pf
         self.hierarchy = pf.h
@@ -2544,7 +2544,7 @@
             avg_spacing = (float(vol) / n_parts) ** (1. / 3.)
             linking_length = link * avg_spacing
         else:
-            linking_length = na.abs(link)
+            linking_length = np.abs(link)
         self.padding = padding
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -25,13 +25,13 @@
 
 from collections import defaultdict
 import itertools, sys
-import numpy as na
+import numpy as np
 import gc
 
 from yt.funcs import *
 from yt.utilities.performance_counters import yt_counters, time_function
 try:
-    from yt.utilities.kdtree import \
+    from yt.utilities.kdtree.api import \
         chainHOP_tags_dens, \
         create_tree, fKD, find_nn_nearest_neighbors, \
         free_tree, find_chunk_nearest_neighbors
@@ -88,23 +88,23 @@
         for taskID in global_bounds:
             thisLE, thisRE = global_bounds[taskID]
             if self.mine != taskID:
-                vertices.append(na.array([thisLE[0], thisLE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisLE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisRE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisLE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisRE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisLE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisRE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisRE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisLE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisLE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisRE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisLE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisRE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisLE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisRE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisRE[1], thisRE[2], taskID]))
             if self.mine == taskID:
-                my_vertices.append(na.array([thisLE[0], thisLE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisLE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisRE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisLE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisRE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisLE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisRE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisRE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisLE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisLE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisRE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisLE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisRE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisLE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisRE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisRE[1], thisRE[2]]))
         # Find the neighbors we share corners with. Yes, this is lazy with
         # a double loop, but it works and this is definitely not a performance
         # bottleneck.
@@ -119,13 +119,13 @@
                 # Also test to see if the distance to this corner is within
                 # max_padding, which is more likely the case with load-balancing
                 # turned on.
-                dx = min( na.fabs(my_vertex[0] - vertex[0]), \
-                    self.period[0] - na.fabs(my_vertex[0] - vertex[0]))
-                dy = min( na.fabs(my_vertex[1] - vertex[1]), \
-                    self.period[1] - na.fabs(my_vertex[1] - vertex[1]))
-                dz = min( na.fabs(my_vertex[2] - vertex[2]), \
-                    self.period[2] - na.fabs(my_vertex[2] - vertex[2]))
-                d = na.sqrt(dx*dx + dy*dy + dz*dz)
+                dx = min( np.fabs(my_vertex[0] - vertex[0]), \
+                    self.period[0] - np.fabs(my_vertex[0] - vertex[0]))
+                dy = min( np.fabs(my_vertex[1] - vertex[1]), \
+                    self.period[1] - np.fabs(my_vertex[1] - vertex[1]))
+                dz = min( np.fabs(my_vertex[2] - vertex[2]), \
+                    self.period[2] - np.fabs(my_vertex[2] - vertex[2]))
+                d = np.sqrt(dx*dx + dy*dy + dz*dz)
                 if d <= self.max_padding:
                     self.neighbors.add(int(vertex[3]))
         # Faces and edges.
@@ -219,13 +219,13 @@
         annulus data.
         """
         if round == 'first':
-            max_pad = na.max(self.padding)
+            max_pad = np.max(self.padding)
             self.mine, self.global_padding = self.comm.mpi_info_dict(max_pad)
             self.max_padding = max(self.global_padding.itervalues())
         elif round == 'second':
             self.max_padding = 0.
             for neighbor in self.neighbors:
-                self.max_padding = na.maximum(self.global_padding[neighbor], \
+                self.max_padding = np.maximum(self.global_padding[neighbor], \
                     self.max_padding)
 
     def _communicate_padding_data(self):
@@ -247,7 +247,7 @@
         # This will reduce the size of the loop over particles.
         yt_counters("Picking padding data to send.")
         send_count = self.is_inside_annulus.sum()
-        points = na.empty((send_count, 3), dtype='float64')
+        points = np.empty((send_count, 3), dtype='float64')
         points[:,0] = self.xpos[self.is_inside_annulus]
         points[:,1] = self.ypos[self.is_inside_annulus]
         points[:,2] = self.zpos[self.is_inside_annulus]
@@ -280,9 +280,9 @@
         recv_size = 0
         for opp_neighbor in self.neighbors:
             opp_size = global_send_count[opp_neighbor][self.mine]
-            recv_real_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            recv_points[opp_neighbor] = na.empty((opp_size, 3), dtype='float64')
-            recv_mass[opp_neighbor] = na.empty(opp_size, dtype='float64')
+            recv_real_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            recv_points[opp_neighbor] = np.empty((opp_size, 3), dtype='float64')
+            recv_mass[opp_neighbor] = np.empty(opp_size, dtype='float64')
             recv_size += opp_size
         yt_counters("Initalizing recv arrays.")
         # Setup the receiving slots.
@@ -306,11 +306,11 @@
         yt_counters("Processing padded data.")
         del send_real_indices, send_points, send_mass
         # Now we add the data to ourselves.
-        self.index_pad = na.empty(recv_size, dtype='int64')
-        self.xpos_pad = na.empty(recv_size, dtype='float64')
-        self.ypos_pad = na.empty(recv_size, dtype='float64')
-        self.zpos_pad = na.empty(recv_size, dtype='float64')
-        self.mass_pad = na.empty(recv_size, dtype='float64')
+        self.index_pad = np.empty(recv_size, dtype='int64')
+        self.xpos_pad = np.empty(recv_size, dtype='float64')
+        self.ypos_pad = np.empty(recv_size, dtype='float64')
+        self.zpos_pad = np.empty(recv_size, dtype='float64')
+        self.mass_pad = np.empty(recv_size, dtype='float64')
         so_far = 0
         for opp_neighbor in self.neighbors:
             opp_size = global_send_count[opp_neighbor][self.mine]
@@ -335,7 +335,7 @@
         yt_counters("Flipping coordinates around the periodic boundary.")
         self.size = self.index.size + self.index_pad.size
         # Now that we have the full size, initialize the chainID array
-        self.chainID = na.ones(self.size,dtype='int64') * -1
+        self.chainID = np.ones(self.size,dtype='int64') * -1
         # Clean up explicitly, but these should be empty dicts by now.
         del recv_real_indices, hooks, recv_points, recv_mass
         yt_counters("Communicate discriminated padding")
@@ -348,10 +348,10 @@
         if self.tree == 'F':
             # Yes, we really do need to initialize this many arrays.
             # They're deleted in _parallelHOP.
-            fKD.dens = na.zeros(self.size, dtype='float64', order='F')
-            fKD.mass = na.concatenate((self.mass, self.mass_pad))
+            fKD.dens = np.zeros(self.size, dtype='float64', order='F')
+            fKD.mass = np.concatenate((self.mass, self.mass_pad))
             del self.mass
-            fKD.pos = na.empty((3, self.size), dtype='float64', order='F')
+            fKD.pos = np.empty((3, self.size), dtype='float64', order='F')
             # This actually copies the data into the fortran space.
             self.psize = self.xpos.size
             fKD.pos[0, :self.psize] = self.xpos
@@ -364,7 +364,7 @@
             fKD.pos[2, self.psize:] = self.zpos_pad
             del self.xpos_pad, self.ypos_pad, self.zpos_pad
             gc.collect()
-            fKD.qv = na.asfortranarray(na.empty(3, dtype='float64'))
+            fKD.qv = np.asfortranarray(np.empty(3, dtype='float64'))
             fKD.nn = self.num_neighbors
             # Plus 2 because we're looking for that neighbor, but only keeping 
             # nMerge + 1 neighbor tags, skipping ourselves.
@@ -375,8 +375,8 @@
             # Now call the fortran.
             create_tree(0)
         elif self.tree == 'C':
-            self.mass = na.concatenate((self.mass, self.mass_pad))
-            self.pos = na.empty((self.size, 3), dtype='float64')
+            self.mass = np.concatenate((self.mass, self.mass_pad))
+            self.pos = np.empty((self.size, 3), dtype='float64')
             self.psize = self.xpos.size
             self.pos[:self.psize, 0] = self.xpos
             self.pos[:self.psize, 1] = self.ypos
@@ -407,7 +407,7 @@
         # Test to see if the points are in the 'real' region
         (LE, RE) = self.bounds
         if round == 'first':
-            points = na.empty((self.real_size, 3), dtype='float64')
+            points = np.empty((self.real_size, 3), dtype='float64')
             points[:,0] = self.xpos
             points[:,1] = self.ypos
             points[:,2] = self.zpos
@@ -426,21 +426,21 @@
         temp_LE = LE + self.max_padding
         temp_RE = RE - self.max_padding
         if round == 'first':
-            inner = na.invert( (points >= temp_LE).all(axis=1) * \
+            inner = np.invert( (points >= temp_LE).all(axis=1) * \
                 (points < temp_RE).all(axis=1) )
         elif round == 'second' or round == 'third':
             if self.tree == 'F':
-                inner = na.invert( (fKD.pos.T >= temp_LE).all(axis=1) * \
+                inner = np.invert( (fKD.pos.T >= temp_LE).all(axis=1) * \
                     (fKD.pos.T < temp_RE).all(axis=1) )
             elif self.tree == 'C':
-                inner = na.invert( (self.pos >= temp_LE).all(axis=1) * \
+                inner = np.invert( (self.pos >= temp_LE).all(axis=1) * \
                     (self.pos < temp_RE).all(axis=1) )
         if round == 'first':
             del points
         # After inverting the logic above, we want points that are both
         # inside the real region, but within one padding of the boundary,
         # and this will do it.
-        self.is_inside_annulus = na.bitwise_and(self.is_inside, inner)
+        self.is_inside_annulus = np.bitwise_and(self.is_inside, inner)
         del inner
         # Below we make a mapping of real particle index->local ID
         # Unf. this has to be a dict, because any task can have
@@ -449,10 +449,10 @@
         # as the full number of particles.
         # We can skip this the first two times around.
         if round == 'third':
-            temp = na.arange(self.size)
-            my_part = na.bitwise_or(na.invert(self.is_inside), self.is_inside_annulus)
-            my_part = na.bitwise_and(my_part, (self.chainID != -1))
-            catted_indices = na.concatenate(
+            temp = np.arange(self.size)
+            my_part = np.bitwise_or(np.invert(self.is_inside), self.is_inside_annulus)
+            my_part = np.bitwise_and(my_part, (self.chainID != -1))
+            catted_indices = np.concatenate(
                 (self.index, self.index_pad))[my_part]
             self.rev_index = dict.fromkeys(catted_indices)
             self.rev_index.update(itertools.izip(catted_indices, temp[my_part]))
@@ -468,11 +468,11 @@
         keeping the all of this data, just using it.
         """
         yt_counters("densestNN")
-        self.densestNN = na.empty(self.size,dtype='int64')
+        self.densestNN = np.empty(self.size,dtype='int64')
         # We find nearest neighbors in chunks.
         chunksize = 10000
         if self.tree == 'F':
-            fKD.chunk_tags = na.asfortranarray(na.empty((self.num_neighbors, chunksize), dtype='int64'))
+            fKD.chunk_tags = np.asfortranarray(np.empty((self.num_neighbors, chunksize), dtype='int64'))
             start = 1 # Fortran counting!
             finish = 0
             while finish < self.size:
@@ -486,8 +486,8 @@
                 chunk_NNtags = (fKD.chunk_tags[:,:finish-start+1] - 1).transpose()
                 # Find the densest nearest neighbors by referencing the already
                 # calculated density.
-                n_dens = na.take(self.density,chunk_NNtags)
-                max_loc = na.argmax(n_dens,axis=1)
+                n_dens = np.take(self.density,chunk_NNtags)
+                max_loc = np.argmax(n_dens,axis=1)
                 for i in xrange(finish - start + 1): # +1 for fortran counting.
                     j = start + i - 1 # -1 for fortran counting.
                     self.densestNN[j] = chunk_NNtags[i,max_loc[i]]
@@ -502,9 +502,9 @@
                 # be as memory efficient - fragmenting?
                 chunk_NNtags = self.kdtree.find_chunk_nearest_neighbors(start, \
                     finish, num_neighbors=self.num_neighbors)
-                n_dens = na.take(self.density, chunk_NNtags)
-                max_loc = na.argmax(n_dens, axis=1)
-                max_loc = na.argmax(n_dens,axis=1)
+                n_dens = np.take(self.density, chunk_NNtags)
+                max_loc = np.argmax(n_dens, axis=1)
+                max_loc = np.argmax(n_dens,axis=1)
                 for i in xrange(finish - start):
                     j = start + i
                     self.densestNN[j] = chunk_NNtags[i,max_loc[i]]
@@ -520,8 +520,8 @@
         """
         yt_counters("build_chains")
         chainIDmax = 0
-        self.densest_in_chain = na.ones(10000, dtype='float64') * -1 # chainID->density, one to one
-        self.densest_in_chain_real_index = na.ones(10000, dtype='int64') * -1 # chainID->real_index, one to one
+        self.densest_in_chain = np.ones(10000, dtype='float64') * -1 # chainID->density, one to one
+        self.densest_in_chain_real_index = np.ones(10000, dtype='int64') * -1 # chainID->real_index, one to one
         for i in xrange(int(self.size)):
             # If it's already in a group, move on, or if this particle is
             # in the padding, move on because chains can only terminate in
@@ -536,7 +536,7 @@
             # in the next loop.
             if chainIDnew == chainIDmax:
                 chainIDmax += 1
-        self.padded_particles = na.array(self.padded_particles, dtype='int64')
+        self.padded_particles = np.array(self.padded_particles, dtype='int64')
         self.densest_in_chain = self.__clean_up_array(self.densest_in_chain)
         self.densest_in_chain_real_index = self.__clean_up_array(self.densest_in_chain_real_index)
         yt_counters("build_chains")
@@ -598,9 +598,9 @@
         yt_counters("preconnect_chains")
         yt_counters("local chain sorting.")
         sort = self.densest_in_chain.argsort()
-        sort = na.flipud(sort)
-        map = na.empty(sort.size,dtype='int64')
-        map[sort] = na.arange(sort.size)
+        sort = np.flipud(sort)
+        map = np.empty(sort.size,dtype='int64')
+        map[sort] = np.arange(sort.size)
         self.densest_in_chain = self.densest_in_chain[sort]
         self.densest_in_chain_real_index = self.densest_in_chain_real_index[sort]
         del sort
@@ -626,8 +626,8 @@
         elif self.tree == 'F':
             # Plus 2 because we're looking for that neighbor, but only keeping 
             # nMerge + 1 neighbor tags, skipping ourselves.
-            fKD.dist = na.empty(self.nMerge+2, dtype='float64')
-            fKD.tags = na.empty(self.nMerge+2, dtype='int64')
+            fKD.dist = np.empty(self.nMerge+2, dtype='float64')
+            fKD.tags = np.empty(self.nMerge+2, dtype='int64')
             # We can change this here to make the searches faster.
             fKD.nn = self.nMerge + 2
             for i in xrange(self.size):
@@ -685,7 +685,7 @@
         # link is to itself. At that point we've found the densest chain
         # in this set of sets and we keep a record of that.
         yt_counters("preconnect pregrouping.")
-        final_chain_map = na.empty(max(self.chainID)+1, dtype='int64')
+        final_chain_map = np.empty(max(self.chainID)+1, dtype='int64')
         removed = 0
         for i in xrange(self.chainID.max()+1):
             j = chain_count - i - 1
@@ -701,9 +701,9 @@
                 self.chainID[i] = final_chain_map[self.chainID[i]]
         del final_chain_map
         # Now make the chainID assignments consecutive.
-        map = na.empty(self.densest_in_chain.size, dtype='int64')
-        dic_new = na.empty(chain_count - removed, dtype='float64')
-        dicri_new = na.empty(chain_count - removed, dtype='int64')
+        map = np.empty(self.densest_in_chain.size, dtype='int64')
+        dic_new = np.empty(chain_count - removed, dtype='float64')
+        dicri_new = np.empty(chain_count - removed, dtype='int64')
         new = 0
         for i,dic in enumerate(self.densest_in_chain):
             if dic > 0:
@@ -763,9 +763,9 @@
         mylog.info("Sorting chains...")
         yt_counters("global chain sorting.")
         sort = self.densest_in_chain.argsort()
-        sort = na.flipud(sort)
-        map = na.empty(sort.size,dtype='int64')
-        map[sort] =na.arange(sort.size)
+        sort = np.flipud(sort)
+        map = np.empty(sort.size,dtype='int64')
+        map[sort] =np.arange(sort.size)
         self.densest_in_chain = self.densest_in_chain[sort]
         self.densest_in_chain_real_index = self.densest_in_chain_real_index[sort]
         del sort
@@ -779,14 +779,14 @@
         mylog.info("Pre-linking chains 'by hand'...")
         yt_counters("global chain hand-linking.")
         # If there are no repeats, we can skip this mess entirely.
-        uniq = na.unique(self.densest_in_chain_real_index)
+        uniq = np.unique(self.densest_in_chain_real_index)
         if uniq.size != self.densest_in_chain_real_index.size:
             # Find only the real particle indices that are repeated to reduce
             # the dict workload below.
             dicri = self.densest_in_chain_real_index[self.densest_in_chain_real_index.argsort()]
-            diff = na.ediff1d(dicri)
+            diff = np.ediff1d(dicri)
             diff = (diff == 0) # Picks out the places where the ids are equal
-            diff = na.concatenate((diff, [False])) # Makes it the same length
+            diff = np.concatenate((diff, [False])) # Makes it the same length
             # This has only the repeated IDs. Sets are faster at searches than
             # arrays.
             dicri = set(dicri[diff])
@@ -837,11 +837,11 @@
         for opp_neighbor in self.neighbors:
             opp_size = self.global_padded_count[opp_neighbor]
             to_recv_count += opp_size
-            temp_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            temp_chainIDs[opp_neighbor] = na.empty(opp_size, dtype='int64')
+            temp_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            temp_chainIDs[opp_neighbor] = np.empty(opp_size, dtype='int64')
         # The arrays we'll actually keep around...
-        self.recv_real_indices = na.empty(to_recv_count, dtype='int64')
-        self.recv_chainIDs = na.empty(to_recv_count, dtype='int64')
+        self.recv_real_indices = np.empty(to_recv_count, dtype='int64')
+        self.recv_chainIDs = np.empty(to_recv_count, dtype='int64')
         # Set up the receives, but don't actually use them.
         hooks = []
         for opp_neighbor in self.neighbors:
@@ -899,9 +899,9 @@
         """
         yt_counters("connect_chains_across_tasks")
         # Remote (lower dens) chain -> local (higher) chain.
-        chainID_translate_map_local = na.arange(self.nchains, dtype='int64')
+        chainID_translate_map_local = np.arange(self.nchains, dtype='int64')
         # Build the stuff to send.
-        self.uphill_real_indices = na.concatenate((
+        self.uphill_real_indices = np.concatenate((
             self.index, self.index_pad))[self.padded_particles]
         self.uphill_chainIDs = self.chainID[self.padded_particles]
         del self.padded_particles
@@ -991,7 +991,7 @@
         """
         yt_counters("communicate_annulus_chainIDs")
         # Pick the particles in the annulus.
-        real_indices = na.concatenate(
+        real_indices = np.concatenate(
             (self.index, self.index_pad))[self.is_inside_annulus]
         chainIDs = self.chainID[self.is_inside_annulus]
         # We're done with this here.
@@ -1012,8 +1012,8 @@
         recv_chainIDs = dict.fromkeys(self.neighbors)
         for opp_neighbor in self.neighbors:
             opp_size = global_annulus_count[opp_neighbor]
-            recv_real_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            recv_chainIDs[opp_neighbor] = na.empty(opp_size, dtype='int64')
+            recv_real_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            recv_chainIDs[opp_neighbor] = np.empty(opp_size, dtype='int64')
         # Set up the receving hooks.
         hooks = []
         for opp_neighbor in self.neighbors:
@@ -1062,8 +1062,8 @@
         # Plus 2 because we're looking for that neighbor, but only keeping 
         # nMerge + 1 neighbor tags, skipping ourselves.
         if self.tree == 'F':
-            fKD.dist = na.empty(self.nMerge+2, dtype='float64')
-            fKD.tags = na.empty(self.nMerge+2, dtype='int64')
+            fKD.dist = np.empty(self.nMerge+2, dtype='float64')
+            fKD.tags = np.empty(self.nMerge+2, dtype='int64')
             # We can change this here to make the searches faster.
             fKD.nn = self.nMerge+2
         elif self.tree == 'C':
@@ -1160,9 +1160,9 @@
                 top_keys.append(top_key)
                 bot_keys.append(bot_key)
                 vals.append(data[top_key][bot_key])
-        top_keys = na.array(top_keys, dtype='int64')
-        bot_keys = na.array(bot_keys, dtype='int64')
-        vals = na.array(vals, dtype='float64')
+        top_keys = np.array(top_keys, dtype='int64')
+        bot_keys = np.array(bot_keys, dtype='int64')
+        vals = np.array(vals, dtype='float64')
 
         data.clear()
 
@@ -1179,14 +1179,14 @@
         # We need to find out which pairs of self.top_keys, self.bot_keys are
         # both < self.peakthresh, and create arrays that will store this
         # relationship.
-        both = na.bitwise_and((self.densest_in_chain[self.top_keys] < self.peakthresh),
+        both = np.bitwise_and((self.densest_in_chain[self.top_keys] < self.peakthresh),
             (self.densest_in_chain[self.bot_keys] < self.peakthresh))
         g_high = self.top_keys[both]
         g_low = self.bot_keys[both]
         g_dens = self.vals[both]
         del both
-        self.reverse_map = na.ones(self.densest_in_chain.size) * -1
-        densestbound = na.ones(self.densest_in_chain.size) * -1.0
+        self.reverse_map = np.ones(self.densest_in_chain.size) * -1
+        densestbound = np.ones(self.densest_in_chain.size) * -1.0
         for i, gl in enumerate(g_low):
             if g_dens[i] > densestbound[gl]:
                 densestbound[gl] = g_dens[i]
@@ -1200,7 +1200,7 @@
             if self.densest_in_chain[chainID] >= self.peakthresh:
                 self.reverse_map[chainID] = groupID
                 groupID += 1
-        group_equivalancy_map = na.empty(groupID, dtype='object')
+        group_equivalancy_map = np.empty(groupID, dtype='object')
         for i in xrange(groupID):
             group_equivalancy_map[i] = set([])
         # Loop over all of the chain linkages.
@@ -1259,7 +1259,7 @@
         # Shack.'
         Set_list = []
         # We only want the holes that are modulo mine.
-        keys = na.arange(groupID, dtype='int64')
+        keys = np.arange(groupID, dtype='int64')
         size = self.comm.size
         select = (keys % size == self.mine)
         groupIDs = keys[select]
@@ -1298,7 +1298,7 @@
         del group_equivalancy_map, final_set, keys, select, groupIDs, current_sets
         del mine_groupIDs, not_mine_groupIDs, new_set, to_add_set, liter
         # Convert this list of sets into a look-up table
-        lookup = na.ones(self.densest_in_chain.size, dtype='int64') * (self.densest_in_chain.size + 2)
+        lookup = np.ones(self.densest_in_chain.size, dtype='int64') * (self.densest_in_chain.size + 2)
         for i,item in enumerate(Set_list):
             item_min = min(item)
             for groupID in item:
@@ -1353,7 +1353,7 @@
             # There are no groups, probably.
             pass
         # Make a secondary map to make the IDs consecutive.
-        values = na.arange(len(temp))
+        values = np.arange(len(temp))
         secondary_map = dict(itertools.izip(temp, values))
         del values
         # Update reverse_map
@@ -1386,8 +1386,8 @@
                 self.chainID[i] = -1
         del self.is_inside
         # Create a densest_in_group, analogous to densest_in_chain.
-        keys = na.arange(group_count)
-        vals = na.zeros(group_count)
+        keys = np.arange(group_count)
+        vals = np.zeros(group_count)
         self.densest_in_group = dict(itertools.izip(keys,vals))
         self.densest_in_group_real_index = self.densest_in_group.copy()
         del keys, vals
@@ -1409,12 +1409,12 @@
         velocity, to save time in HaloFinding.py (fewer barriers!).
         """
         select = (self.chainID != -1)
-        calc = len(na.where(select == True)[0])
-        loc = na.empty((calc, 3), dtype='float64')
+        calc = len(np.where(select == True)[0])
+        loc = np.empty((calc, 3), dtype='float64')
         if self.tree == 'F':
-            loc[:, 0] = na.concatenate((self.xpos, self.xpos_pad))[select]
-            loc[:, 1] = na.concatenate((self.ypos, self.ypos_pad))[select]
-            loc[:, 2] = na.concatenate((self.zpos, self.zpos_pad))[select]
+            loc[:, 0] = np.concatenate((self.xpos, self.xpos_pad))[select]
+            loc[:, 1] = np.concatenate((self.ypos, self.ypos_pad))[select]
+            loc[:, 2] = np.concatenate((self.zpos, self.zpos_pad))[select]
             self.__max_memory()
             del self.xpos_pad, self.ypos_pad, self.zpos_pad
         elif self.tree == 'C':
@@ -1424,15 +1424,15 @@
         # I think this will be faster than several vector operations that need
         # to pull the entire chainID array out of memory several times.
         yt_counters("max dens point")
-        max_dens_point = na.zeros((self.group_count,4),dtype='float64')
-        for i,part in enumerate(na.arange(self.size)[select]):
+        max_dens_point = np.zeros((self.group_count,4),dtype='float64')
+        for i,part in enumerate(np.arange(self.size)[select]):
             groupID = self.chainID[part]
             if part < self.real_size:
                 real_index = self.index[part]
             else:
                 real_index = self.index_pad[part - self.real_size]
             if real_index == self.densest_in_group_real_index[groupID]:
-                max_dens_point[groupID] = na.array([self.density[part], \
+                max_dens_point[groupID] = np.array([self.density[part], \
                 loc[i, 0], loc[i, 1], loc[i, 2]])
         del self.index, self.index_pad, self.densest_in_group_real_index
         # Now we broadcast this, effectively, with an allsum. Even though
@@ -1443,25 +1443,25 @@
         yt_counters("max dens point")
         # Now CoM.
         yt_counters("CoM")
-        CoM_M = na.zeros((self.group_count,3),dtype='float64')
-        Tot_M = na.zeros(self.group_count, dtype='float64')
-        #c_vec = self.max_dens_point[:,1:4][subchain] - na.array([0.5,0.5,0.5])
+        CoM_M = np.zeros((self.group_count,3),dtype='float64')
+        Tot_M = np.zeros(self.group_count, dtype='float64')
+        #c_vec = self.max_dens_point[:,1:4][subchain] - np.array([0.5,0.5,0.5])
         if calc:
-            c_vec = self.max_dens_point[:,1:4][subchain] - na.array([0.5,0.5,0.5])
-            size = na.bincount(self.chainID[select]).astype('int64')
+            c_vec = self.max_dens_point[:,1:4][subchain] - np.array([0.5,0.5,0.5])
+            size = np.bincount(self.chainID[select]).astype('int64')
         else:
             # This task has no particles in groups!
-            size = na.zeros(self.group_count, dtype='int64')
+            size = np.zeros(self.group_count, dtype='int64')
         # In case this task doesn't have all the groups, add trailing zeros.
         if size.size != self.group_count:
-            size = na.concatenate((size, na.zeros(self.group_count - size.size, dtype='int64')))
+            size = np.concatenate((size, np.zeros(self.group_count - size.size, dtype='int64')))
         if calc:
             cc = loc - c_vec
-            cc = cc - na.floor(cc)
-            ms = na.concatenate((self.mass, self.mass_pad))[select]
+            cc = cc - np.floor(cc)
+            ms = np.concatenate((self.mass, self.mass_pad))[select]
             # Most of the time, the masses will be all the same, and we can try
             # to save some effort.
-            ms_u = na.unique(ms)
+            ms_u = np.unique(ms)
             if ms_u.size == 1:
                 single = True
                 Tot_M = size.astype('float64') * ms_u
@@ -1475,13 +1475,13 @@
             sort = subchain.argsort()
             cc = cc[sort]
             sort_subchain = subchain[sort]
-            uniq_subchain = na.unique(sort_subchain)
-            diff_subchain = na.ediff1d(sort_subchain)
+            uniq_subchain = np.unique(sort_subchain)
+            diff_subchain = np.ediff1d(sort_subchain)
             marks = (diff_subchain > 0)
-            marks = na.arange(calc)[marks] + 1
-            marks = na.concatenate(([0], marks, [calc]))
+            marks = np.arange(calc)[marks] + 1
+            marks = np.concatenate(([0], marks, [calc]))
             for i, u in enumerate(uniq_subchain):
-                CoM_M[u] = na.sum(cc[marks[i]:marks[i+1]], axis=0)
+                CoM_M[u] = np.sum(cc[marks[i]:marks[i+1]], axis=0)
             if not single:
                 for i,groupID in enumerate(subchain):
                     Tot_M[groupID] += ms[i]
@@ -1490,31 +1490,31 @@
                 # Don't divide by zero.
                 if groupID in self.I_own:
                     CoM_M[groupID] /= Tot_M[groupID]
-                    CoM_M[groupID] += self.max_dens_point[groupID,1:4] - na.array([0.5,0.5,0.5])
+                    CoM_M[groupID] += self.max_dens_point[groupID,1:4] - np.array([0.5,0.5,0.5])
                     CoM_M[groupID] *= Tot_M[groupID]
         # Now we find their global values
         self.group_sizes = self.comm.mpi_allreduce(size, op='sum')
         CoM_M = self.comm.mpi_allreduce(CoM_M, op='sum')
         self.Tot_M = self.comm.mpi_allreduce(Tot_M, op='sum')
-        self.CoM = na.empty((self.group_count,3), dtype='float64')
+        self.CoM = np.empty((self.group_count,3), dtype='float64')
         for groupID in xrange(int(self.group_count)):
             self.CoM[groupID] = CoM_M[groupID] / self.Tot_M[groupID]
         yt_counters("CoM")
         self.__max_memory()
         # Now we find the maximum radius for all groups.
         yt_counters("max radius")
-        max_radius = na.zeros(self.group_count, dtype='float64')
+        max_radius = np.zeros(self.group_count, dtype='float64')
         if calc:
             com = self.CoM[subchain]
-            rad = na.fabs(com - loc)
-            dist = (na.minimum(rad, self.period - rad)**2.).sum(axis=1)
+            rad = np.fabs(com - loc)
+            dist = (np.minimum(rad, self.period - rad)**2.).sum(axis=1)
             dist = dist[sort]
             for i, u in enumerate(uniq_subchain):
-                max_radius[u] = na.max(dist[marks[i]:marks[i+1]])
+                max_radius[u] = np.max(dist[marks[i]:marks[i+1]])
         # Find the maximum across all tasks.
         mylog.info('Fraction of particles in this region in groups: %f' % (float(calc)/self.size))
         self.max_radius = self.comm.mpi_allreduce(max_radius, op='max')
-        self.max_radius = na.sqrt(self.max_radius)
+        self.max_radius = np.sqrt(self.max_radius)
         yt_counters("max radius")
         yt_counters("Precomp.")
         self.__max_memory()
@@ -1558,7 +1558,7 @@
         chain_count = self._build_chains()
         # This array tracks whether or not relationships for this particle
         # need to be examined twice, in preconnect_chains and in connect_chains
-        self.search_again = na.ones(self.size, dtype='bool')
+        self.search_again = np.ones(self.size, dtype='bool')
         if self.premerge:
             chain_count = self._preconnect_chains(chain_count)
         mylog.info('Gobally assigning chainIDs...')
@@ -1625,7 +1625,7 @@
         try:
             arr[key] = value
         except IndexError:
-            arr = na.concatenate((arr, na.ones(10000, dtype=type)*-1))
+            arr = np.concatenate((arr, np.ones(10000, dtype=type)*-1))
             arr[key] = value
         return arr
     


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import math, time
 
 from yt.funcs import *
@@ -186,7 +186,7 @@
         f = open(self.halo_file,'r')
         line = f.readline()
         if line == "":
-            self.haloes = na.array([])
+            self.haloes = np.array([])
             return
         while line[0] == '#':
             line = f.readline()
@@ -198,16 +198,16 @@
                 self.haloes.append(float(line[self.mass_column]))
             line = f.readline()
         f.close()
-        self.haloes = na.array(self.haloes)
+        self.haloes = np.array(self.haloes)
 
     def bin_haloes(self):
         """
         With the list of virial masses, find the halo mass function.
         """
-        bins = na.logspace(self.log_mass_min,
+        bins = np.logspace(self.log_mass_min,
             self.log_mass_max,self.num_sigma_bins)
         avgs = (bins[1:]+bins[:-1])/2.
-        dis, bins = na.histogram(self.haloes,bins)
+        dis, bins = np.histogram(self.haloes,bins)
         # add right to left
         for i,b in enumerate(dis):
             dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
@@ -246,13 +246,13 @@
 
         # output arrays
         # 1) log10 of mass (Msolar, NOT Msolar/h)
-        self.Rarray = na.empty(self.num_sigma_bins,dtype='float64')
+        self.Rarray = np.empty(self.num_sigma_bins,dtype='float64')
         # 2) mass (Msolar/h)
-        self.logmassarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.logmassarray = np.empty(self.num_sigma_bins, dtype='float64')
         # 3) spatial scale corresponding to that radius (Mpc/h)
-        self.massarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.massarray = np.empty(self.num_sigma_bins, dtype='float64')
         # 4) sigma(M, z=0, where mass is in Msun/h)
-        self.sigmaarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.sigmaarray = np.empty(self.num_sigma_bins, dtype='float64')
 
         # get sigma_8 normalization
         R = 8.0;  # in units of Mpc/h (comoving)
@@ -305,9 +305,9 @@
         
         # output arrays
         # 5) (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
-        self.dn_M_z = na.empty(self.num_sigma_bins, dtype='float64')
+        self.dn_M_z = np.empty(self.num_sigma_bins, dtype='float64')
         # 6) cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
-        self.nofmz_cum = na.zeros(self.num_sigma_bins, dtype='float64')
+        self.nofmz_cum = np.zeros(self.num_sigma_bins, dtype='float64')
         
         for j in xrange(self.num_sigma_bins - 1):
             i = (self.num_sigma_bins - 2) - j
@@ -360,7 +360,7 @@
 
         Rcom = self.R;  # this is R in comoving Mpc/h
 
-        f = k*k*self.PofK(k)*na.power( abs(self.WofK(Rcom,k)), 2.0);
+        f = k*k*self.PofK(k)*np.power( abs(self.WofK(Rcom,k)), 2.0);
 
         return f
 
@@ -369,7 +369,7 @@
         /* returns power spectrum as a function of wavenumber k */
         """
 
-        thisPofK = na.power(k, self.primordial_index) * na.power( self.TofK(k), 2.0);
+        thisPofK = np.power(k, self.primordial_index) * np.power( self.TofK(k), 2.0);
 
         return thisPofK;
 
@@ -389,7 +389,7 @@
 
         x = R*k;
 
-        thisWofK = 3.0 * ( na.sin(x) - x*na.cos(x) ) / (x*x*x);
+        thisWofK = 3.0 * ( np.sin(x) - x*np.cos(x) ) / (x*x*x);
 
         return thisWofK;
 
@@ -660,22 +660,22 @@
         self.y_freestream = 17.2*self.f_hdm*(1+0.488*math.pow(self.f_hdm,-7.0/6.0))* \
             SQR(self.num_degen_hdm*self.qq/self.f_hdm);
         temp1 = math.pow(self.growth_k0, 1.0-self.p_cb);
-        temp2 = na.power(self.growth_k0/(1+self.y_freestream),0.7);
-        self.growth_cb = na.power(1.0+temp2, self.p_cb/0.7)*temp1;
-        self.growth_cbnu = na.power(na.power(self.f_cb,0.7/self.p_cb)+temp2, self.p_cb/0.7)*temp1;
+        temp2 = np.power(self.growth_k0/(1+self.y_freestream),0.7);
+        self.growth_cb = np.power(1.0+temp2, self.p_cb/0.7)*temp1;
+        self.growth_cbnu = np.power(np.power(self.f_cb,0.7/self.p_cb)+temp2, self.p_cb/0.7)*temp1;
     
         # Compute the master function
         self.gamma_eff = self.omhh*(self.alpha_gamma+(1-self.alpha_gamma)/ \
             (1+SQR(SQR(kk*self.sound_horizon_fit*0.43))));
         self.qq_eff = self.qq*self.omhh/self.gamma_eff;
     
-        tf_sup_L = na.log(2.71828+1.84*self.beta_c*self.alpha_gamma*self.qq_eff);
-        tf_sup_C = 14.4+325/(1+60.5*na.power(self.qq_eff,1.11));
+        tf_sup_L = np.log(2.71828+1.84*self.beta_c*self.alpha_gamma*self.qq_eff);
+        tf_sup_C = 14.4+325/(1+60.5*np.power(self.qq_eff,1.11));
         self.tf_sup = tf_sup_L/(tf_sup_L+tf_sup_C*SQR(self.qq_eff));
     
         self.qq_nu = 3.92*self.qq*math.sqrt(self.num_degen_hdm/self.f_hdm);
         self.max_fs_correction = 1+1.2*math.pow(self.f_hdm,0.64)*math.pow(self.num_degen_hdm,0.3+0.6*self.f_hdm)/ \
-            (na.power(self.qq_nu,-1.6)+na.power(self.qq_nu,0.8));
+            (np.power(self.qq_nu,-1.6)+np.power(self.qq_nu,0.8));
         self.tf_master = self.tf_sup*self.max_fs_correction;
     
         # Now compute the CDM+HDM+baryon transfer functions
@@ -707,21 +707,21 @@
     changes by less than *error*. Hopefully someday we can do something
     better than this!
     """
-    xvals = na.logspace(0,na.log10(initial_guess), initial_guess+1)-.9
+    xvals = np.logspace(0,np.log10(initial_guess), initial_guess+1)-.9
     yvals = fcn(xvals)
     xdiffs = xvals[1:] - xvals[:-1]
-    # Trapezoid rule, but with different dxes between values, so na.trapz
+    # Trapezoid rule, but with different dxes between values, so np.trapz
     # will not work.
     areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-    area0 = na.sum(areas)
+    area0 = np.sum(areas)
     # Next guess.
     next_guess = 10 * initial_guess
-    xvals = na.logspace(0,na.log10(next_guess), 2*initial_guess**2+1)-.99
+    xvals = np.logspace(0,np.log10(next_guess), 2*initial_guess**2+1)-.99
     yvals = fcn(xvals)
     xdiffs = xvals[1:] - xvals[:-1]
     # Trapezoid rule.
     areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-    area1 = na.sum(areas)
+    area1 = np.sum(areas)
     # Now we refine until the error is smaller than *error*.
     diff = area1 - area0
     area_final = area1
@@ -729,12 +729,12 @@
     one_pow = 3
     while diff > error:
         next_guess *= 10
-        xvals = na.logspace(0,na.log10(next_guess), one_pow*initial_guess**one_pow+1) - (1 - 0.1**one_pow)
+        xvals = np.logspace(0,np.log10(next_guess), one_pow*initial_guess**one_pow+1) - (1 - 0.1**one_pow)
         yvals = fcn(xvals)
         xdiffs = xvals[1:] - xvals[:-1]
         # Trapezoid rule.
         areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-        area_next = na.sum(areas)
+        area_next = np.sum(areas)
         diff = area_next - area_last
         area_last = area_next
         one_pow+=1


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -41,7 +41,7 @@
 # 8. Parentage is described by a fraction of particles that pass from one to
 #    the other; we have both descendent fractions and ancestory fractions. 
 
-import numpy as na
+import numpy as np
 import h5py
 import time
 import pdb
@@ -119,7 +119,7 @@
             x,y,z = [float(f) for f in line.split(None, 3)[:-1]]
             hp.append([x,y,z])
         if hp != []:
-            self.halo_positions = na.array(hp)
+            self.halo_positions = np.array(hp)
             self.halo_kdtree = KDTree(self.halo_positions)
         else:
             self.halo_positions = None
@@ -158,7 +158,7 @@
 class HaloParticleList(object):
     def __init__(self, halo_id, position, particle_ids):
         self.halo_id = halo_id
-        self.position = na.array(position)
+        self.position = np.array(position)
         self.particle_ids = particle_ids
         self.number_of_particles = particle_ids.size
 
@@ -168,7 +168,7 @@
     def find_relative_parentage(self, child):
         # Return two values: percent this halo gave to the other, and percent
         # of the other that comes from this halo
-        overlap = na.intersect1d(self.particle_ids, child.particle_ids).size
+        overlap = np.intersect1d(self.particle_ids, child.particle_ids).size
         of_child_from_me = float(overlap)/child.particle_ids.size
         of_mine_from_me = float(overlap)/self.particle_ids.size
         return of_child_from_me, of_mine_from_me


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import os, glob, time, gc, md5, sys
 import h5py
 import types
@@ -37,10 +37,7 @@
 from yt.convenience import load
 from yt.utilities.logger import ytLogger as mylog
 import yt.utilities.pydot as pydot
-try:
-    from yt.utilities.kdtree import *
-except ImportError:
-    mylog.debug("The Fortran kD-Tree did not import correctly.")
+from yt.utilities.spatial import cKDTree
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelDummy, \
     ParallelAnalysisInterface, \
@@ -174,7 +171,7 @@
         """
         ParallelAnalysisInterface.__init__(self)
         self.restart_files = restart_files # list of enzo restart files
-        self.with_halos = na.ones(len(restart_files), dtype='bool')
+        self.with_halos = np.ones(len(restart_files), dtype='bool')
         self.database = database # the sqlite database of haloes.
         self.halo_finder_function = halo_finder_function # which halo finder to use
         self.halo_finder_threshold = halo_finder_threshold # overdensity threshold
@@ -349,16 +346,8 @@
                 child_points.append([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-            # Turn it into fortran.
-            child_points = na.array(child_points)
-            fKD.pos = na.asfortranarray(child_points.T)
-            fKD.qv = na.empty(3, dtype='float64')
-            fKD.dist = na.empty(NumNeighbors, dtype='float64')
-            fKD.tags = na.empty(NumNeighbors, dtype='int64')
-            fKD.nn = NumNeighbors
-            fKD.sort = True
-            fKD.rearrange = True
-            create_tree(0)
+            child_points = np.array(child_points)
+            kdtree = cKDTree(child_points, leafsize = 10)
     
         # Find the parent points from the database.
         parent_pf = load(parentfile)
@@ -373,22 +362,20 @@
             candidates = {}
             for row in self.cursor:
                 # Normalize positions for use within the kdtree.
-                fKD.qv = na.array([row[1] / self.period[0],
+                query = np.array([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-                find_nn_nearest_neighbors()
-                NNtags = fKD.tags[:] - 1
+                NNtags = kdtree.query(query, NumNeighbors, period=self.period)[1]
                 nIDs = []
                 for n in NNtags:
-                    nIDs.append(n)
+                    if n not in nIDs:
+                        nIDs.append(n)
                 # We need to fill in fake halos if there aren't enough halos,
                 # which can happen at high redshifts.
                 while len(nIDs) < NumNeighbors:
                     nIDs.append(-1)
                 candidates[row[0]] = nIDs
-            
-            del fKD.pos, fKD.tags, fKD.dist
-            free_tree(0) # Frees the kdtree object.
+            del kdtree
         else:
             candidates = None
 
@@ -400,7 +387,7 @@
         # The +1 is an extra element in the array that collects garbage
         # values. This is allowing us to eliminate a try/except later.
         # This extra array element will be cut off eventually.
-        self.child_mass_arr = na.zeros(len(candidates)*NumNeighbors + 1,
+        self.child_mass_arr = np.zeros(len(candidates)*NumNeighbors + 1,
             dtype='float64')
         # Records where to put the entries in the above array.
         self.child_mass_loc = defaultdict(dict)
@@ -450,9 +437,9 @@
             # the parent dataset.
             parent_names = list(self.names[parent_currt])
             parent_names.sort()
-            parent_IDs = na.array([], dtype='int64')
-            parent_masses = na.array([], dtype='float64')
-            parent_halos = na.array([], dtype='int32')
+            parent_IDs = []
+            parent_masses = []
+            parent_halos = []
             for i,pname in enumerate(parent_names):
                 if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                     h5fp = h5py.File(pname)
@@ -460,31 +447,38 @@
                         gID = int(group[4:])
                         thisIDs = h5fp[group]['particle_index'][:]
                         thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                        parent_IDs = na.concatenate((parent_IDs, thisIDs))
-                        parent_masses = na.concatenate((parent_masses, thisMasses))
-                        parent_halos = na.concatenate((parent_halos, 
-                            na.ones(thisIDs.size, dtype='int32') * gID))
+                        parent_IDs.append(thisIDs)
+                        parent_masses.append(thisMasses)
+                        parent_halos.append(np.ones(len(thisIDs),
+                            dtype='int32') * gID)
                         del thisIDs, thisMasses
                     h5fp.close()
-            
             # Sort the arrays by particle index in ascending order.
-            sort = parent_IDs.argsort()
-            parent_IDs = parent_IDs[sort]
-            parent_masses = parent_masses[sort]
-            parent_halos = parent_halos[sort]
-            del sort
+            if len(parent_IDs)==0:
+                parent_IDs = np.array([], dtype='int64')
+                parent_masses = np.array([], dtype='float64')
+                parent_halos = np.array([], dtype='int32')
+            else:
+                parent_IDs = np.concatenate(parent_IDs).astype('int64')
+                parent_masses = np.concatenate(parent_masses).astype('float64')
+                parent_halos = np.concatenate(parent_halos).astype('int32')
+                sort = parent_IDs.argsort()
+                parent_IDs = parent_IDs[sort]
+                parent_masses = parent_masses[sort]
+                parent_halos = parent_halos[sort]
+                del sort
         else:
             # We can use old data and save disk reading.
             (parent_IDs, parent_masses, parent_halos) = last
         # Used to communicate un-matched particles.
-        parent_send = na.ones(parent_IDs.size, dtype='bool')
-        
+        parent_send = np.ones(parent_IDs.size, dtype='bool')
+
         # Now get the child halo data.
         child_names = list(self.names[child_currt])
         child_names.sort()
-        child_IDs = na.array([], dtype='int64')
-        child_masses = na.array([], dtype='float64')
-        child_halos = na.array([], dtype='int32')
+        child_IDs = []
+        child_masses = []
+        child_halos = []
         for i,cname in enumerate(child_names):
             if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                 h5fp = h5py.File(cname)
@@ -492,20 +486,28 @@
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]
                     thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                    child_IDs = na.concatenate((child_IDs, thisIDs))
-                    child_masses = na.concatenate((child_masses, thisMasses))
-                    child_halos = na.concatenate((child_halos, 
-                        na.ones(thisIDs.size, dtype='int32') * gID))
+                    child_IDs.append(thisIDs)
+                    child_masses.append(thisMasses)
+                    child_halos.append(np.ones(len(thisIDs),
+                        dtype='int32') * gID)
                     del thisIDs, thisMasses
                 h5fp.close()
+        # Sort the arrays by particle index in ascending order.
+        if len(child_IDs)==0:
+            child_IDs = np.array([], dtype='int64')
+            child_masses = np.array([], dtype='float64')
+            child_halos = np.array([], dtype='int32')
+        else:
+            child_IDs = np.concatenate(child_IDs).astype('int64')
+            child_masses = np.concatenate(child_masses)
+            child_halos = np.concatenate(child_halos)
+            sort = child_IDs.argsort()
+            child_IDs = child_IDs[sort]
+            child_masses = child_masses[sort]
+            child_halos = child_halos[sort]
+            del sort
         
-        # Sort the arrays by particle index.
-        sort = child_IDs.argsort()
-        child_IDs = child_IDs[sort]
-        child_masses = child_masses[sort]
-        child_halos = child_halos[sort]
-        child_send = na.ones(child_IDs.size, dtype='bool')
-        del sort
+        child_send = np.ones(child_IDs.size, dtype='bool')
         
         # Match particles in halos.
         self._match(parent_IDs, child_IDs, parent_halos, child_halos,
@@ -618,8 +620,8 @@
     def _match(self, parent_IDs, child_IDs, parent_halos, child_halos,
             parent_masses, parent_send = None, child_send = None):
         # Pick out IDs that are in both arrays.
-        parent_in_child = na.in1d(parent_IDs, child_IDs, assume_unique = True)
-        child_in_parent = na.in1d(child_IDs, parent_IDs, assume_unique = True)
+        parent_in_child = np.in1d(parent_IDs, child_IDs, assume_unique = True)
+        child_in_parent = np.in1d(child_IDs, parent_IDs, assume_unique = True)
         # Pare down the arrays to just matched particle IDs.
         parent_halos_cut = parent_halos[parent_in_child]
         child_halos_cut = child_halos[child_in_parent]


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/halo_profiler/centering_methods.py
--- a/yt/analysis_modules/halo_profiler/centering_methods.py
+++ b/yt/analysis_modules/halo_profiler/centering_methods.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/halo_profiler/halo_filters.py
--- a/yt/analysis_modules/halo_profiler/halo_filters.py
+++ b/yt/analysis_modules/halo_profiler/halo_filters.py
@@ -24,7 +24,7 @@
 """
 
 from copy import deepcopy
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -105,11 +105,11 @@
 
     if use_log:
         for field in temp_profile.keys():
-            temp_profile[field] = na.log10(temp_profile[field])
+            temp_profile[field] = np.log10(temp_profile[field])
 
     virial = dict((field, 0.0) for field in fields)
 
-    if (not (na.array(overDensity) >= virial_overdensity).any()) and \
+    if (not (np.array(overDensity) >= virial_overdensity).any()) and \
             must_be_virialized:
         mylog.debug("This halo is not virialized!")
         return [False, {}]
@@ -123,7 +123,7 @@
     elif (overDensity[-1] >= virial_overdensity):
         index = -2
     else:
-        for q in (na.arange(len(overDensity),0,-1)-1):
+        for q in (np.arange(len(overDensity),0,-1)-1):
             if (overDensity[q] < virial_overdensity) and (overDensity[q-1] >= virial_overdensity):
                 index = q - 1
                 break
@@ -144,7 +144,7 @@
 
     if use_log:
         for field in virial.keys():
-            virial[field] = na.power(10, virial[field])
+            virial[field] = np.power(10, virial[field])
 
     for vfilter in virial_filters:
         if eval("%s %s %s" % (virial[vfilter[0]],vfilter[1],vfilter[2])):


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import os
 import h5py
 import types
@@ -605,6 +605,7 @@
 
         if newProfile:
             mylog.info("Writing halo %d" % halo['id'])
+            if os.path.exists(filename): os.remove(filename)
             if filename.endswith('.h5'):
                 profile.write_out_h5(filename)
             else:
@@ -682,7 +683,7 @@
                 max_val, maxi, mx, my, mz, mg = sphere.quantities['MaxLocation'](self.velocity_center[1])
                                                                                  
                 max_grid = self.pf.h.grids[mg]
-                max_cell = na.unravel_index(maxi, max_grid.ActiveDimensions)
+                max_cell = np.unravel_index(maxi, max_grid.ActiveDimensions)
                 sphere.set_field_parameter('bulk_velocity', [max_grid['x-velocity'][max_cell],
                                                              max_grid['y-velocity'][max_cell],
                                                              max_grid['z-velocity'][max_cell]])
@@ -715,7 +716,9 @@
             Default=True.
         njobs : int
             The number of jobs over which to split the projections.  Set
-            to -1 so that each halo is done by a single processor.
+            to -1 so that each halo is done by a single processor.  Halo 
+            projections do not currently work in parallel, so this must 
+            be set to -1.
             Default: -1.
         dynamic : bool
             If True, distribute halos using a task queue.  If False,
@@ -729,6 +732,12 @@
 
         """
 
+        # Halo projections cannot run in parallel because they are done by 
+        # giving a data source to the projection object.
+        if njobs > 0:
+            mylog.warn("Halo projections cannot use more than one processor per halo, setting njobs to -1.")
+            njobs = -1
+        
         # Get list of halos for projecting.
         if halo_list == 'filtered':
             halo_projection_list = self.filtered_halos
@@ -843,7 +852,7 @@
                               (self.projection_output_dir, halo['id'],
                                dataset_name, axis_labels[w])
                             if (frb[hp['field']] != 0).any():
-                                write_image(na.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
+                                write_image(np.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
                             else:
                                 mylog.info('Projection of %s for halo %d is all zeros, skipping image.' %
                                             (hp['field'], halo['id']))
@@ -1074,7 +1083,7 @@
                     profile[field].append(float(onLine[q]))
 
         for field in fields:
-            profile[field] = na.array(profile[field])
+            profile[field] = np.array(profile[field])
 
         profile_obj._data = profile
 
@@ -1169,7 +1178,7 @@
         for halo in self.filtered_halos:
             for halo_field in halo_fields:
                 if isinstance(halo[halo_field], types.ListType):
-                    field_data = na.array(halo[halo_field])
+                    field_data = np.array(halo[halo_field])
                     field_data.tofile(out_file, sep="\t", format=format)
                 else:
                     if halo_field == 'id':
@@ -1177,7 +1186,7 @@
                     else:
                         out_file.write("%s" % halo[halo_field])
                 out_file.write("\t")
-            field_data = na.array([halo[field] for field in fields])
+            field_data = np.array([halo[field] for field in fields])
             field_data.tofile(out_file, sep="\t", format=format)
             out_file.write("\n")
         out_file.close()
@@ -1205,7 +1214,7 @@
             value_list = []
             for halo in self.filtered_halos:
                 value_list.append(halo[halo_field])
-            value_list = na.array(value_list)
+            value_list = np.array(value_list)
             out_file.create_dataset(halo_field, data=value_list)
         out_file.close()
 
@@ -1213,7 +1222,7 @@
         fid = open(filename, "w")
         fields = [field for field in sorted(profile.keys()) if field != "UsedBins"]
         fid.write("\t".join(["#"] + fields + ["\n"]))
-        field_data = na.array([profile[field] for field in fields])
+        field_data = np.array([profile[field] for field in fields])
         for line in range(field_data.shape[1]):
             field_data[:, line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -1298,17 +1307,17 @@
         add2_y_weight_field = plot['weight_field'][plot['py'] - 0.5 * plot['pdy'] < 0]
 
         # Add the hanging cells back to the projection data.
-        plot.field_data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px,
+        plot.field_data['px'] = np.concatenate([plot['px'], add_x_px, add_y_px,
                                                 add2_x_px, add2_y_px])
-        plot.field_data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py,
+        plot.field_data['py'] = np.concatenate([plot['py'], add_x_py, add_y_py,
                                                 add2_x_py, add2_y_py])
-        plot.field_data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx,
+        plot.field_data['pdx'] = np.concatenate([plot['pdx'], add_x_pdx, add_y_pdx,
                                                  add2_x_pdx, add2_y_pdx])
-        plot.field_data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy,
+        plot.field_data['pdy'] = np.concatenate([plot['pdy'], add_x_pdy, add_y_pdy,
                                                  add2_x_pdy, add2_y_pdy])
-        plot.field_data[field] = na.concatenate([plot[field], add_x_field, add_y_field,
+        plot.field_data[field] = np.concatenate([plot[field], add_x_field, add_y_field,
                                                  add2_x_field, add2_y_field])
-        plot.field_data['weight_field'] = na.concatenate([plot['weight_field'],
+        plot.field_data['weight_field'] = np.concatenate([plot['weight_field'],
                                                           add_x_weight_field, add_y_weight_field,
                                                           add2_x_weight_field, add2_y_weight_field])
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
--- a/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
+++ b/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
@@ -24,7 +24,7 @@
 """
 
 import h5py, os.path
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.data_containers import YTFieldData
@@ -57,7 +57,7 @@
         self.Level = level
         self.LeftEdge = left_edge
         self.RightEdge = right_edge
-        self.start_index = na.min([grid.get_global_startindex() for grid in
+        self.start_index = np.min([grid.get_global_startindex() for grid in
                              base_pf.h.select_grids(level)], axis=0).astype('int64')
         self.dds = base_pf.h.select_grids(level)[0].dds.copy()
         dims = (self.RightEdge-self.LeftEdge)/self.dds
@@ -106,11 +106,11 @@
         self.pf = pf
         self.always_copy = always_copy
         self.min_level = min_level
-        self.int_offset = na.min([grid.get_global_startindex() for grid in
+        self.int_offset = np.min([grid.get_global_startindex() for grid in
                              pf.h.select_grids(min_level)], axis=0).astype('float64')
-        min_left = na.min([grid.LeftEdge for grid in
+        min_left = np.min([grid.LeftEdge for grid in
                            pf.h.select_grids(min_level)], axis=0).astype('float64')
-        max_right = na.max([grid.RightEdge for grid in 
+        max_right = np.max([grid.RightEdge for grid in 
                                    pf.h.select_grids(min_level)], axis=0).astype('float64')
         if offset is None: offset = (max_right + min_left)/2.0
         self.left_edge_offset = offset
@@ -151,7 +151,7 @@
         # Grid objects on this level...
         if grids is None: grids = self.pf.h.select_grids(level+self.min_level)
         level_node.attrs['delta'] = grids[0].dds*self.mult_factor
-        level_node.attrs['relativeRefinementFactor'] = na.array([2]*3, dtype='int32')
+        level_node.attrs['relativeRefinementFactor'] = np.array([2]*3, dtype='int32')
         level_node.attrs['numGrids'] = len(grids)
         for i,g in enumerate(grids):
             self.export_grid(afile, level_node, g, i, field)
@@ -169,8 +169,8 @@
         int_origin, lint, origin, dds = self._convert_grid(grid)
         grid_node.attrs['integerOrigin'] = int_origin
         grid_node.attrs['origin'] = origin
-        grid_node.attrs['ghostzoneFlags'] = na.zeros(6, dtype='int32')
-        grid_node.attrs['numGhostzones'] = na.zeros(3, dtype='int32')
+        grid_node.attrs['ghostzoneFlags'] = np.zeros(6, dtype='int32')
+        grid_node.attrs['numGhostzones'] = np.zeros(3, dtype='int32')
         grid_node.attrs['dims'] = grid.ActiveDimensions[::-1].astype('int32')
         if not self.always_copy and self.pf.h.data_style == 6 \
            and field in self.pf.h.field_list:
@@ -203,11 +203,11 @@
         # First we set up our translation between original and extracted
         self.data_style = data_style
         self.min_level = pf.min_level
-        self.int_offset = na.min([grid.get_global_startindex() for grid in
+        self.int_offset = np.min([grid.get_global_startindex() for grid in
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
-        min_left = na.min([grid.LeftEdge for grid in
+        min_left = np.min([grid.LeftEdge for grid in
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
-        max_right = na.max([grid.RightEdge for grid in 
+        max_right = np.max([grid.RightEdge for grid in 
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
         level_dx = pf.base_pf.h.select_grids(pf.min_level)[0].dds[0]
         dims = ((max_right-min_left)/level_dx)
@@ -247,12 +247,12 @@
         # Here we need to set up the grid info, which for the Enzo hierarchy
         # is done like:
         # self.grid_dimensions.flat[:] = ei
-        # self.grid_dimensions -= na.array(si, self.float_type)
+        # self.grid_dimensions -= np.array(si, self.float_type)
         # self.grid_dimensions += 1
         # self.grid_left_edge.flat[:] = LE
         # self.grid_right_edge.flat[:] = RE
         # self.grid_particle_count.flat[:] = np
-        # self.grids = na.array(self.grids, dtype='object')
+        # self.grids = np.array(self.grids, dtype='object')
         #
         # For now, we make the presupposition that all of our grids are
         # strictly nested and we are not doing any cuts.  However, we do
@@ -285,7 +285,7 @@
 
         self.grid_left_edge = self._convert_coords(self.grid_left_edge)
         self.grid_right_edge = self._convert_coords(self.grid_right_edge)
-        self.grids = na.array(grids, dtype='object')
+        self.grids = np.array(grids, dtype='object')
 
     def _fill_grid_arrays(self, grid, i):
         # This just fills in the grid arrays for a single grid --


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -22,7 +22,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import copy
 
 from yt.funcs import *
@@ -133,6 +133,7 @@
         else:
             exec(operation)
 
+        if self.children is None: return
         for child in self.children:
             child.pass_down(operation)
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/level_sets/clump_tools.py
--- a/yt/analysis_modules/level_sets/clump_tools.py
+++ b/yt/analysis_modules/level_sets/clump_tools.py
@@ -23,8 +23,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
-nar = na.array
+import numpy as np
+nar = np.array
 
 counter = 0
 def recursive_all_clumps(clump,list,level,parentnumber):
@@ -89,7 +89,7 @@
     yt.visualization.plot_modification.ClumpContourCallback"""
     minDensity = [c['Density'].min() for c in clump_list]
     
-    args = na.argsort(minDensity)
+    args = np.argsort(minDensity)
     list = nar(clump_list)[args]
     reverse = range(list.size-1,-1,-1)
     return list[reverse]


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -24,7 +24,7 @@
 """
 
 from itertools import chain
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import yt.utilities.data_point_utilities as data_point_utilities
@@ -63,12 +63,12 @@
     tr = []
     for k in joins.keys():
         v = joins.pop(k)
-        tr.append((k, na.array(list(v), dtype="int64")))
+        tr.append((k, np.array(list(v), dtype="int64")))
     return tr
 
 def identify_contours(data_source, field, min_val, max_val,
                           cached_fields=None):
-    cur_max_id = na.sum([g.ActiveDimensions.prod() for g in data_source._grids])
+    cur_max_id = np.sum([g.ActiveDimensions.prod() for g in data_source._grids])
     pbar = get_pbar("First pass", len(data_source._grids))
     grids = sorted(data_source._grids, key=lambda g: -g.Level)
     total_contours = 0
@@ -76,27 +76,27 @@
     for gi,grid in enumerate(grids):
         pbar.update(gi+1)
         cm = data_source._get_cut_mask(grid)
-        if cm is True: cm = na.ones(grid.ActiveDimensions, dtype='bool')
+        if cm is True: cm = np.ones(grid.ActiveDimensions, dtype='bool')
         old_field_parameters = grid.field_parameters
         grid.field_parameters = data_source.field_parameters
-        local_ind = na.where( (grid[field] > min_val)
+        local_ind = np.where( (grid[field] > min_val)
                             & (grid[field] < max_val) & cm )
         grid.field_parameters = old_field_parameters
         if local_ind[0].size == 0: continue
-        kk = na.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
-        grid["tempContours"] = na.ones(grid.ActiveDimensions, dtype='int64') * -1
+        kk = np.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
+        grid["tempContours"] = np.ones(grid.ActiveDimensions, dtype='int64') * -1
         grid["tempContours"][local_ind] = kk[:]
         cur_max_id -= local_ind[0].size
-        xi_u,yi_u,zi_u = na.where(grid["tempContours"] > -1)
-        cor_order = na.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
+        xi_u,yi_u,zi_u = np.where(grid["tempContours"] > -1)
+        cor_order = np.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
         fd_orig = grid["tempContours"].copy()
         xi = xi_u[cor_order]
         yi = yi_u[cor_order]
         zi = zi_u[cor_order]
         while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
             pass
-        total_contours += na.unique(grid["tempContours"][grid["tempContours"] > -1]).size
-        new_contours = na.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
+        total_contours += np.unique(grid["tempContours"][grid["tempContours"] > -1]).size
+        new_contours = np.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
         tree += zip(new_contours, new_contours)
     tree = set(tree)
     pbar.finish()
@@ -110,10 +110,10 @@
         boundary_tree = amr_utils.construct_boundary_relationships(fd)
         tree.update(((a, b) for a, b in boundary_tree))
     pbar.finish()
-    sort_new = na.array(list(tree), dtype='int64')
+    sort_new = np.array(list(tree), dtype='int64')
     mylog.info("Coalescing %s joins", sort_new.shape[0])
     joins = coalesce_join_tree(sort_new)
-    #joins = [(i, na.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
+    #joins = [(i, np.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
     pbar = get_pbar("Joining ", len(joins))
     # This process could and should be done faster
     print "Joining..."
@@ -136,9 +136,9 @@
     data_source.get_data("tempContours")
     contour_ind = {}
     i = 0
-    for contour_id in na.unique(data_source["tempContours"]):
+    for contour_id in np.unique(data_source["tempContours"]):
         if contour_id == -1: continue
-        contour_ind[i] = na.where(data_source["tempContours"] == contour_id)
+        contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
         mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
         i += 1
     mylog.info("Identified %s contours between %0.5e and %0.5e",


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/radial_column_density/radial_column_density.py
--- a/yt/analysis_modules/radial_column_density/radial_column_density.py
+++ b/yt/analysis_modules/radial_column_density/radial_column_density.py
@@ -105,14 +105,14 @@
         """
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
-        self.center = na.asarray(center)
+        self.center = np.asarray(center)
         self.max_radius = max_radius
         self.steps = steps
         self.base = base
         self.Nside = Nside
         self.ang_divs = ang_divs
-        self.real_ang_divs = int(na.abs(ang_divs))
-        self.phi, self.theta = na.mgrid[0.0:2*na.pi:ang_divs, 0:na.pi:ang_divs]
+        self.real_ang_divs = int(np.abs(ang_divs))
+        self.phi, self.theta = np.mgrid[0.0:2*np.pi:ang_divs, 0:np.pi:ang_divs]
         self.phi1d = self.phi[:,0]
         self.theta1d = self.theta[0,:]
         self.dphi = self.phi1d[1] - self.phi1d[0]
@@ -135,20 +135,20 @@
         # but this will work for now.
         right = self.pf.domain_right_edge - self.center
         left = self.center - self.pf.domain_left_edge
-        min_r = na.min(right)
-        min_l = na.min(left)
-        self.max_radius = na.min([self.max_radius, min_r, min_l])
+        min_r = np.min(right)
+        min_l = np.min(left)
+        self.max_radius = np.min([self.max_radius, min_r, min_l])
     
     def _make_bins(self):
         # We'll make the bins start from the smallest cell size to the
         # specified radius. Column density inside the same cell as our 
         # center is kind of ill-defined, anyway.
         if self.base == 'lin':
-            self.bins = na.linspace(self.pf.h.get_smallest_dx(), self.max_radius,
+            self.bins = np.linspace(self.pf.h.get_smallest_dx(), self.max_radius,
                 self.steps)
         elif self.base == 'log':
-            self.bins = na.logspace(na.log10(self.pf.h.get_smallest_dx()),
-                na.log10(self.max_radius), self.steps)
+            self.bins = np.logspace(np.log10(self.pf.h.get_smallest_dx()),
+                np.log10(self.max_radius), self.steps)
     
     def _build_surfaces(self, field):
         # This will be index by bin index.
@@ -172,17 +172,17 @@
             Values of zero are found outside the maximum radius and
             in the cell of the user-specified center point.
             This setting is useful if the field is going to be logged
-            (e.g. na.log10) where zeros are inconvenient.
+            (e.g. np.log10) where zeros are inconvenient.
             Default = None
         """
         x = data['x']
         sh = x.shape
-        ad = na.prod(sh)
+        ad = np.prod(sh)
         if type(data) == type(FieldDetector()):
-            return na.ones(sh)
+            return np.ones(sh)
         y = data['y']
         z = data['z']
-        pos = na.array([x.reshape(ad), y.reshape(ad), z.reshape(ad)]).T
+        pos = np.array([x.reshape(ad), y.reshape(ad), z.reshape(ad)]).T
         del x, y, z
         vals = self._interpolate_value(pos)
         del pos
@@ -199,25 +199,25 @@
         # according to the points angle.
         # 1. Find the angle from the center point to the position.
         vec = pos - self.center
-        phi = na.arctan2(vec[:, 1], vec[:, 0])
+        phi = np.arctan2(vec[:, 1], vec[:, 0])
         # Convert the convention from [-pi, pi) to [0, 2pi).
         sel = (phi < 0)
-        phi[sel] += 2 * na.pi
+        phi[sel] += 2 * np.pi
         # Find the radius.
-        r = na.sqrt(na.sum(vec * vec, axis = 1))
+        r = np.sqrt(np.sum(vec * vec, axis = 1))
         # Keep track of the points outside of self.max_radius, which we'll
         # handle separately before we return.
         outside = (r > self.max_radius)
-        theta = na.arccos(vec[:, 2] / r)
+        theta = np.arccos(vec[:, 2] / r)
         # 2. Find the bin for this position.
-        digi = na.digitize(r, self.bins)
+        digi = np.digitize(r, self.bins)
         # Find the values on the inner and outer surfaces.
-        in_val = na.zeros_like(r)
-        out_val = na.zeros_like(r)
+        in_val = np.zeros_like(r)
+        out_val = np.zeros_like(r)
         # These two will be used for interpolation.
-        in_r = na.zeros_like(r)
-        out_r = na.zeros_like(r)
-        for bin in na.unique(digi):
+        in_r = np.zeros_like(r)
+        out_r = np.zeros_like(r)
+        for bin in np.unique(digi):
             sel = (digi == bin)
             # Special case if we're outside the largest sphere.
             if bin == len(self.bins):
@@ -229,7 +229,7 @@
                 continue
             # Special case if we're inside the smallest sphere.
             elif bin == 0:
-                in_val[sel] = na.zeros_like(phi[sel])
+                in_val[sel] = np.zeros_like(phi[sel])
                 in_r[sel] = 0.
                 out_val[sel] = self._interpolate_surface_value(1,
                     phi[sel], theta[sel])
@@ -244,11 +244,11 @@
                     phi[sel], theta[sel])
                 out_r[sel] = self.bins[bin]
         # Interpolate using a linear fit in column density / r space.
-        val = na.empty_like(r)
+        val = np.empty_like(r)
         # Special case for inside smallest sphere.
         sel = (digi == 0)
         val[sel] = (1. - (out_r[sel] - r[sel]) / out_r[sel]) * out_val[sel]
-        na.invert(sel, sel) # In-place operation!
+        np.invert(sel, sel) # In-place operation!
         val[sel] = (out_val[sel] - in_val[sel]) / (out_r[sel] - in_r[sel]) * \
             (r[sel] - in_r[sel]) + in_val[sel]
         # Fix the things to zero that should be zero.
@@ -259,8 +259,8 @@
         # Given a surface bin and an angle, interpolate the value on
         # that surface to the angle.
         # 1. Find the four values closest to the angle.
-        phi_bin = na.digitize(phi, self.phi1d)
-        theta_bin = na.digitize(theta, self.theta1d)
+        phi_bin = np.digitize(phi, self.phi1d)
+        theta_bin = np.digitize(theta, self.theta1d)
         val00 = self.surfaces[bin][phi_bin - 1, theta_bin - 1]
         val01 = self.surfaces[bin][phi_bin - 1, theta_bin]
         val10 = self.surfaces[bin][phi_bin, theta_bin - 1]


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -47,18 +47,18 @@
 
         self.bounds = bounds
         self.ev_bounds = ev_bounds
-        self.ev_vals = na.logspace(ev_bounds[0], ev_bounds[1], table.shape[-1])
+        self.ev_vals = np.logspace(ev_bounds[0], ev_bounds[1], table.shape[-1])
         
     def _get_interpolator(self, ev_min, ev_max):
         """
         Integrates from ev_min to ev_max and returns an interpolator.
         """
-        e_is, e_ie = na.digitize([ev_min, ev_max], self.ev_vals)
-        bin_table = na.trapz(self.table[...,e_is-1:e_ie],
+        e_is, e_ie = np.digitize([ev_min, ev_max], self.ev_vals)
+        bin_table = np.trapz(self.table[...,e_is-1:e_ie],
                              2.41799e17*
             (self.ev_vals[e_is:e_ie+1]-self.ev_vals[e_is-1:e_is]),
                              axis=-1)
-        bin_table = na.log10(bin_table.clip(1e-80,bin_table.max()))
+        bin_table = np.log10(bin_table.clip(1e-80,bin_table.max()))
         return BilinearFieldInterpolator(
             bin_table, self.bounds, self.field_names[:],
             truncate=True)
@@ -73,8 +73,8 @@
         interp = self._get_interpolator(ev_min, ev_max)
         name = "XRay_%s_%s" % (ev_min, ev_max)
         def frequency_bin_field(field, data):
-            dd = {'NumberDensity' : na.log10(data["NumberDensity"]),
-                  'Temperature'   : na.log10(data["Temperature"])}
+            dd = {'NumberDensity' : np.log10(data["NumberDensity"]),
+                  'Temperature'   : np.log10(data["Temperature"])}
             return 10**interp(dd)
         add_field(name, function=frequency_bin_field,
                         projection_conversion="cm",
@@ -91,8 +91,8 @@
     e_n_bins, e_min, e_max = e_spec
     T_n_bins, T_min, T_max = T_spec
     # The second one is the fast-varying one
-    rho_is, e_is = na.mgrid[0:rho_n_bins,0:e_n_bins]
-    table = na.zeros((rho_n_bins, T_n_bins, e_n_bins), dtype='float64')
+    rho_is, e_is = np.mgrid[0:rho_n_bins,0:e_n_bins]
+    table = np.zeros((rho_n_bins, T_n_bins, e_n_bins), dtype='float64')
     mylog.info("Parsing Cloudy files")
     for i,ri,ei in zip(range(rho_n_bins*e_n_bins), rho_is.ravel(), e_is.ravel()):
         table[ri,:,ei] = [float(l.split()[-1]) for l in open(pattern%(i+1)) if l[0] != "#"]


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import h5py
 import math, itertools
 
@@ -66,8 +66,8 @@
         """
         self._pf = pf
         self._data_source = data_source
-        self.star_mass = na.array(star_mass)
-        self.star_creation_time = na.array(star_creation_time)
+        self.star_mass = np.array(star_mass)
+        self.star_creation_time = np.array(star_creation_time)
         self.volume = volume
         self.bin_count = bins
         # Check to make sure we have the right set of informations.
@@ -114,13 +114,13 @@
         # Find the oldest stars in units of code time.
         tmin= min(ct_stars)
         # Multiply the end to prevent numerical issues.
-        self.time_bins = na.linspace(tmin*0.99, self._pf.current_time,
+        self.time_bins = np.linspace(tmin*0.99, self._pf.current_time,
             num = self.bin_count + 1)
         # Figure out which bins the stars go into.
-        inds = na.digitize(ct_stars, self.time_bins) - 1
+        inds = np.digitize(ct_stars, self.time_bins) - 1
         # Sum up the stars created in each time bin.
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
-        for index in na.unique(inds):
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
+        for index in np.unique(inds):
             self.mass_bins[index] += sum(mass_stars[inds == index])
         # Calculate the cumulative mass sum over time by forward adding.
         self.cum_mass_bins = self.mass_bins.copy()
@@ -162,13 +162,13 @@
                 (self.time_bins_dt[i] * tc / YEAR) / vol)
             self.Msol.append(self.mass_bins[i])
             self.Msol_cumulative.append(self.cum_mass_bins[i])
-        self.time = na.array(self.time)
-        self.lookback_time = na.array(self.lookback_time)
-        self.redshift = na.array(self.redshift)
-        self.Msol_yr = na.array(self.Msol_yr)
-        self.Msol_yr_vol = na.array(self.Msol_yr_vol)
-        self.Msol = na.array(self.Msol)
-        self.Msol_cumulative = na.array(self.Msol_cumulative)
+        self.time = np.array(self.time)
+        self.lookback_time = np.array(self.lookback_time)
+        self.redshift = np.array(self.redshift)
+        self.Msol_yr = np.array(self.Msol_yr)
+        self.Msol_yr_vol = np.array(self.Msol_yr_vol)
+        self.Msol = np.array(self.Msol)
+        self.Msol_cumulative = np.array(self.Msol_cumulative)
     
     def write_out(self, name="StarFormationRate.out"):
         r"""Write out the star analysis to a text file *name*. The columns are in
@@ -234,10 +234,10 @@
 METAL3 = 0.2828
 METAL4 = 0.6325
 METAL5 = 1.5811
-METALS = na.array([METAL1, METAL2, METAL3, METAL4, METAL5])
+METALS = np.array([METAL1, METAL2, METAL3, METAL4, METAL5])
 
 # Translate METALS array digitize to the table dicts
-MtoD = na.array(["Z0001", "Z0004", "Z004", "Z008", "Z02",  "Z05"])
+MtoD = np.array(["Z0001", "Z0004", "Z004", "Z008", "Z02",  "Z05"])
 
 """
 This spectrum code is based on code from Ken Nagamine, converted from C to Python.
@@ -340,7 +340,7 @@
         >>> spec.calculate_spectrum(data_source=sp, min_age = 1.e6)
         """
         # Initialize values
-        self.final_spec = na.zeros(self.wavelength.size, dtype='float64')
+        self.final_spec = np.zeros(self.wavelength.size, dtype='float64')
         self._data_source = data_source
         if iterable(star_mass):
             self.star_mass = star_mass
@@ -372,7 +372,7 @@
                 """)
                 return None
             if star_metallicity_constant is not None:
-                self.star_metal = na.ones(self.star_mass.size, dtype='float64') * \
+                self.star_metal = np.ones(self.star_mass.size, dtype='float64') * \
                     star_metallicity_constant
             if star_metallicity_fraction is not None:
                 self.star_metal = star_metallicity_fraction
@@ -382,7 +382,7 @@
             self.star_creation_time = ct[ct > 0]
             self.star_mass = self._data_source["ParticleMassMsun"][ct > 0]
             if star_metallicity_constant is not None:
-                self.star_metal = na.ones(self.star_mass.size, dtype='float64') * \
+                self.star_metal = np.ones(self.star_mass.size, dtype='float64') * \
                     star_metallicity_constant
             else:
                 self.star_metal = self._data_source["metallicity_fraction"][ct > 0]
@@ -390,7 +390,7 @@
         self.star_metal /= Zsun
         # Age of star in years.
         dt = (self.time_now - self.star_creation_time * self._pf['Time']) / YEAR
-        dt = na.maximum(dt, 0.0)
+        dt = np.maximum(dt, 0.0)
         # Remove young stars
         sub = dt >= self.min_age
         if len(sub) == 0: return
@@ -398,18 +398,18 @@
         dt = dt[sub]
         self.star_creation_time = self.star_creation_time[sub]
         # Figure out which METALS bin the star goes into.
-        Mindex = na.digitize(self.star_metal, METALS)
+        Mindex = np.digitize(self.star_metal, METALS)
         # Replace the indices with strings.
         Mname = MtoD[Mindex]
         # Figure out which age bin this star goes into.
-        Aindex = na.digitize(dt, self.age)
+        Aindex = np.digitize(dt, self.age)
         # Ratios used for the interpolation.
         ratio1 = (dt - self.age[Aindex-1]) / (self.age[Aindex] - self.age[Aindex-1])
         ratio2 = (self.age[Aindex] - dt) / (self.age[Aindex] - self.age[Aindex-1])
         # Sort the stars by metallicity and then by age, which should reduce
         # memory access time by a little bit in the loop.
-        indexes = na.arange(self.star_metal.size)
-        sort = na.asarray([indexes[i] for i in na.lexsort([indexes, Aindex, Mname])])
+        indexes = np.arange(self.star_metal.size)
+        sort = np.asarray([indexes[i] for i in np.lexsort([indexes, Aindex, Mname])])
         Mname = Mname[sort]
         Aindex = Aindex[sort]
         ratio1 = ratio1[sort]
@@ -426,15 +426,15 @@
             # Get the one just before the one above.
             flux_1 = self.flux[star[0]][star[1]-1,:]
             # interpolate in log(flux), linear in time.
-            int_flux = star[3] * na.log10(flux_1) + star[2] * na.log10(flux)
+            int_flux = star[3] * np.log10(flux_1) + star[2] * np.log10(flux)
             # Add this flux to the total, weighted by mass.
-            self.final_spec += na.power(10., int_flux) * star[4]
+            self.final_spec += np.power(10., int_flux) * star[4]
             pbar.update(i)
         pbar.finish()    
         
         # Normalize.
-        self.total_mass = na.sum(self.star_mass)
-        self.avg_mass = na.mean(self.star_mass)
+        self.total_mass = np.sum(self.star_mass)
+        self.avg_mass = np.mean(self.star_mass)
         tot_metal = sum(self.star_metal * self.star_mass)
         self.avg_metal = math.log10(tot_metal / self.total_mass / Zsun)
 
@@ -455,25 +455,25 @@
 #             # From the flux array for this metal, and our selection, build
 #             # a new flux array just for the ages of these stars, in the 
 #             # same order as the selection of stars.
-#             this_flux = na.matrix(self.flux[metal_name][A])
+#             this_flux = np.matrix(self.flux[metal_name][A])
 #             # Make one for the last time step for each star in the same fashion
 #             # as above.
-#             this_flux_1 = na.matrix(self.flux[metal_name][A-1])
+#             this_flux_1 = np.matrix(self.flux[metal_name][A-1])
 #             # This is kind of messy, but we're going to multiply this_fluxes
 #             # by the appropriate ratios and add it together to do the 
 #             # interpolation in log(flux) and linear in time.
 #             print r1.size
-#             r1 = na.matrix(r1.tolist()*self.wavelength.size).reshape(self.wavelength.size,r1.size).T
-#             r2 = na.matrix(r2.tolist()*self.wavelength.size).reshape(self.wavelength.size,r2.size).T
+#             r1 = np.matrix(r1.tolist()*self.wavelength.size).reshape(self.wavelength.size,r1.size).T
+#             r2 = np.matrix(r2.tolist()*self.wavelength.size).reshape(self.wavelength.size,r2.size).T
 #             print this_flux_1.shape, r1.shape
-#             int_flux = na.multiply(na.log10(this_flux_1),r1) \
-#                 + na.multiply(na.log10(this_flux),r2)
+#             int_flux = np.multiply(np.log10(this_flux_1),r1) \
+#                 + np.multiply(np.log10(this_flux),r2)
 #             # Weight the fluxes by mass.
-#             sm = na.matrix(sm.tolist()*self.wavelength.size).reshape(self.wavelength.size,sm.size).T
-#             int_flux = na.multiply(na.power(10., int_flux), sm)
+#             sm = np.matrix(sm.tolist()*self.wavelength.size).reshape(self.wavelength.size,sm.size).T
+#             int_flux = np.multiply(np.power(10., int_flux), sm)
 #             # Sum along the columns, converting back to an array, adding
 #             # to the full spectrum.
-#             self.final_spec += na.array(int_flux.sum(axis=0))[0,:]
+#             self.final_spec += np.array(int_flux.sum(axis=0))[0,:]
 
     
     def write_out(self, name="sum_flux.out"):
@@ -518,8 +518,8 @@
         >>> spec.write_out_SED(name = "SED.out", flux_norm = 6000.)
         """
         # find the f_nu closest to flux_norm
-        fn_wavelength = na.argmin(abs(self.wavelength - flux_norm))
-        f_nu = self.final_spec * na.power(self.wavelength, 2.) / LIGHT
+        fn_wavelength = np.argmin(abs(self.wavelength - flux_norm))
+        f_nu = self.final_spec * np.power(self.wavelength, 2.) / LIGHT
         # Normalize f_nu
         self.f_nu = f_nu / f_nu[fn_wavelength]
         # Write out.


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -32,7 +32,7 @@
     pass
 
 import time
-import numpy as na
+import numpy as np
 import numpy.linalg as linalg
 import collections
 
@@ -78,14 +78,14 @@
 
     """
 
-    fc = na.array(fc)
-    fwidth = na.array(fwidth)
+    fc = np.array(fc)
+    fwidth = np.array(fwidth)
     
     #we must round the dle,dre to the nearest root grid cells
     ile,ire,super_level,ncells_wide= \
             round_ncells_wide(pf.domain_dimensions,fc-fwidth,fc+fwidth,nwide=ncells_wide)
 
-    assert na.all((ile-ire)==(ile-ire)[0])
+    assert np.all((ile-ire)==(ile-ire)[0])
     mylog.info("rounding specified region:")
     mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fc-fwidth)+tuple(fc+fwidth)))
     mylog.info("to   [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire)))
@@ -153,7 +153,7 @@
         print "[%03i %03i %03i] "%tuple(dre),
         print " with %i halos"%num_halos
         dle,dre = domain
-        dle, dre = na.array(dle),na.array(dre)
+        dle, dre = np.array(dle),np.array(dre)
         fn = fni 
         fn += "%03i_%03i_%03i-"%tuple(dle)
         fn += "%03i_%03i_%03i"%tuple(dre)
@@ -178,7 +178,7 @@
     dn = pf.domain_dimensions
     for halo in halo_list:
         fle, fre = halo.CoM-frvir*halo.Rvir,halo.CoM+frvir*halo.Rvir
-        dle,dre = na.floor(fle*dn), na.ceil(fre*dn)
+        dle,dre = np.floor(fle*dn), np.ceil(fre*dn)
         dle,dre = tuple(dle.astype('int')),tuple(dre.astype('int'))
         if (dle,dre) in domains.keys():
             domains[(dle,dre)] += halo,
@@ -211,7 +211,7 @@
     del field_data
 
     #first we cast every cell as an oct
-    #ngrids = na.max([g.id for g in pf._grids])
+    #ngrids = np.max([g.id for g in pf._grids])
     grids = {}
     levels_all = {} 
     levels_finest = {}
@@ -220,13 +220,13 @@
         levels_all[l]=0
     pbar = get_pbar("Initializing octs ",len(pf.h.grids))
     for gi,g in enumerate(pf.h.grids):
-        ff = na.array([g[f] for f in fields])
+        ff = np.array([g[f] for f in fields])
         og = amr_utils.OctreeGrid(
                 g.child_index_mask.astype('int32'),
                 ff.astype("float64"),
                 g.LeftEdge.astype("float64"),
                 g.ActiveDimensions.astype("int32"),
-                na.ones(1,dtype="float64")*g.dds[0],
+                np.ones(1,dtype="float64")*g.dds[0],
                 g.Level,
                 g.id)
         grids[g.id] = og
@@ -246,11 +246,11 @@
     #oct_list =  amr_utils.OctreeGridList(grids)
     
     #initialize arrays to be passed to the recursion algo
-    o_length = na.sum(levels_all.values())
-    r_length = na.sum(levels_all.values())
-    output   = na.zeros((o_length,len(fields)), dtype='float64')
-    refined  = na.zeros(r_length, dtype='int32')
-    levels   = na.zeros(r_length, dtype='int32')
+    o_length = np.sum(levels_all.values())
+    r_length = np.sum(levels_all.values())
+    output   = np.zeros((o_length,len(fields)), dtype='float64')
+    refined  = np.zeros(r_length, dtype='int32')
+    levels   = np.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
     start_time = time.time()
@@ -332,7 +332,7 @@
         #calculate the floating point LE of the children
         #then translate onto the subgrid integer index 
         parent_fle  = grid.left_edges + cell_index*grid.dx
-        subgrid_ile = na.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
+        subgrid_ile = np.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
         for i, (vertex,hilbert_child) in enumerate(hilbert):
             #vertex is a combination of three 0s and 1s to 
             #denote each of the 8 octs
@@ -340,7 +340,7 @@
                 subgrid = grid #we don't actually descend if we're a superlevel
                 child_ile = cell_index + vertex*2**(-level)
             else:
-                child_ile = subgrid_ile+na.array(vertex)
+                child_ile = subgrid_ile+np.array(vertex)
                 child_ile = child_ile.astype('int')
             RecurseOctreeDepthFirstHilbert(child_ile,pos,
                     subgrid,hilbert_child,output,refined,levels,grids,level+1,
@@ -381,17 +381,17 @@
     col_list.append(pyfits.Column("mass_metals", format='D',
                     array=fd['MetalMass'], unit="Msun"))
     # col_list.append(pyfits.Column("mass_stars", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="Msun"))
     # col_list.append(pyfits.Column("mass_stellar_metals", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="Msun"))
     # col_list.append(pyfits.Column("age_m", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="yr*Msun"))
     # col_list.append(pyfits.Column("age_l", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="yr*Msun"))
     # col_list.append(pyfits.Column("L_bol", format='D',
-    #                 array=na.zeros(size,dtype='D')))
+    #                 array=np.zeros(size,dtype='D')))
     # col_list.append(pyfits.Column("L_lambda", format='D',
-    #                 array=na.zeros(size,dtype='D')))
+    #                 array=np.zeros(size,dtype='D')))
     # The units for gas_temp are really K*Msun. For older Sunrise versions
     # you must set the unit to just K  
     col_list.append(pyfits.Column("gas_temp_m", format='D',
@@ -402,7 +402,7 @@
                     array=fd['CellVolumeCode'].astype('float64')*pf['kpc']**3.0,
                     unit="kpc^3"))
     col_list.append(pyfits.Column("SFR", format='D',
-                    array=na.zeros(size, dtype='D')))
+                    array=np.zeros(size, dtype='D')))
     cols = pyfits.ColDefs(col_list)
     mg_table = pyfits.new_table(cols)
     mg_table.header.update("M_g_tot", tm)
@@ -411,7 +411,7 @@
     mg_table.name = "GRIDDATA"
 
     # Add a dummy Primary; might be a better way to do this!
-    col_list = [pyfits.Column("dummy", format="F", array=na.zeros(1, dtype='float32'))]
+    col_list = [pyfits.Column("dummy", format="F", array=np.zeros(1, dtype='float32'))]
     cols = pyfits.ColDefs(col_list)
     md_table = pyfits.new_table(cols)
     md_table.header.update("snaptime", pf.current_time*pf['years'])
@@ -437,12 +437,12 @@
 
 def round_ncells_wide(dds,fle,fre,nwide=None):
     fc = (fle+fre)/2.0
-    assert na.all(fle < fc)
-    assert na.all(fre > fc)
-    ic = na.rint(fc*dds) #nearest vertex to the center
+    assert np.all(fle < fc)
+    assert np.all(fre > fc)
+    ic = np.rint(fc*dds) #nearest vertex to the center
     ile,ire = ic.astype('int'),ic.astype('int')
     cfle,cfre = fc.copy(),fc.copy()
-    idx = na.array([0,0,0]) #just a random non-equal array
+    idx = np.array([0,0,0]) #just a random non-equal array
     width = 0.0
     if nwide is None:
         #expand until borders are included and
@@ -450,41 +450,41 @@
         idxq,out=False,True
         while not out or not idxq:
             cfle,cfre = fc-width, fc+width
-            ile = na.rint(cfle*dds).astype('int')
-            ire = na.rint(cfre*dds).astype('int')
+            ile = np.rint(cfle*dds).astype('int')
+            ire = np.rint(cfre*dds).astype('int')
             idx = ire-ile
             width += 0.1/dds
             #quit if idxq is true:
-            idxq = idx[0]>0 and na.all(idx==idx[0])
-            out  = na.all(fle>cfle) and na.all(fre<cfre) 
+            idxq = idx[0]>0 and np.all(idx==idx[0])
+            out  = np.all(fle>cfle) and np.all(fre<cfre) 
             assert width[0] < 1.1 #can't go larger than the simulation volume
         nwide = idx[0]
     else:
         #expand until we are nwide cells span
-        while not na.all(idx==nwide):
-            assert na.any(idx<=nwide)
+        while not np.all(idx==nwide):
+            assert np.any(idx<=nwide)
             cfle,cfre = fc-width, fc+width
-            ile = na.rint(cfle*dds).astype('int')
-            ire = na.rint(cfre*dds).astype('int')
+            ile = np.rint(cfle*dds).astype('int')
+            ire = np.rint(cfre*dds).astype('int')
             idx = ire-ile
             width += 1e-2*1.0/dds
-    assert na.all(idx==nwide)
+    assert np.all(idx==nwide)
     assert idx[0]>0
-    maxlevel = -na.rint(na.log2(nwide)).astype('int')
-    assert abs(na.log2(nwide)-na.rint(na.log2(nwide)))<1e-5 #nwide should be a power of 2
+    maxlevel = -np.rint(np.log2(nwide)).astype('int')
+    assert abs(np.log2(nwide)-np.rint(np.log2(nwide)))<1e-5 #nwide should be a power of 2
     return ile,ire,maxlevel,nwide
 
 def round_nearest_edge(pf,fle,fre):
     dds = pf.domain_dimensions
-    ile = na.floor(fle*dds).astype('int')
-    ire = na.ceil(fre*dds).astype('int') 
+    ile = np.floor(fle*dds).astype('int')
+    ire = np.ceil(fre*dds).astype('int') 
     
     #this is the number of cells the super octree needs to expand to
     #must round to the nearest power of 2
-    width = na.max(ire-ile)
+    width = np.max(ire-ile)
     width = nearest_power(width)
     
-    maxlevel = -na.rint(na.log2(width)).astype('int')
+    maxlevel = -np.rint(np.log2(width)).astype('int')
     return ile,ire,maxlevel
 
 def prepare_star_particles(pf,star_type,pos=None,vel=None, age=None,
@@ -497,14 +497,14 @@
         dd = pf.h.all_data()
     idx = dd["particle_type"] == star_type
     if pos is None:
-        pos = na.array([dd["particle_position_%s" % ax]
+        pos = np.array([dd["particle_position_%s" % ax]
                         for ax in 'xyz']).transpose()
-    idx = idx & na.all(pos>fle,axis=1) & na.all(pos<fre,axis=1)
+    idx = idx & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
     pos = pos[idx]*pf['kpc'] #unitary units -> kpc
     if age is None:
         age = dd["particle_age"][idx]*pf['years'] # seconds->years
     if vel is None:
-        vel = na.array([dd["particle_velocity_%s" % ax][idx]
+        vel = np.array([dd["particle_velocity_%s" % ax][idx]
                         for ax in 'xyz']).transpose()
         # Velocity is cm/s, we want it to be kpc/yr
         #vel *= (pf["kpc"]/pf["cm"]) / (365*24*3600.)
@@ -525,8 +525,8 @@
     formation_time = pf.current_time*pf['years']-age
     #create every column
     col_list = []
-    col_list.append(pyfits.Column("ID", format="J", array=na.arange(current_mass.size).astype('int32')))
-    col_list.append(pyfits.Column("parent_ID", format="J", array=na.arange(current_mass.size).astype('int32')))
+    col_list.append(pyfits.Column("ID", format="J", array=np.arange(current_mass.size).astype('int32')))
+    col_list.append(pyfits.Column("parent_ID", format="J", array=np.arange(current_mass.size).astype('int32')))
     col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
     col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
     col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
@@ -540,7 +540,7 @@
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
     #col_list.append(pyfits.Column("L_bol", format="D",
-    #    array=na.zeros(current_mass.size)))
+    #    array=np.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
@@ -570,7 +570,7 @@
                 / data["dynamical_time"])
         xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
                 / data["dynamical_time"])
-        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
+        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*np.exp(-xv1)))
         minitial = data["ParticleMassMsun"] / denom
         return minitial
 
@@ -698,14 +698,14 @@
     camera_positions in Sunrise.
     """
 
-    sim_center = na.array(sim_center)
+    sim_center = np.array(sim_center)
     if sim_sphere_radius is None:
         sim_sphere_radius = 10.0/pf['kpc']
     if sim_axis_short is None:
         if dd is None:
             dd = pf.h.all_data()
-        pos = na.array([dd["particle_position_%s"%i] for i in "xyz"]).T
-        idx = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
+        pos = np.array([dd["particle_position_%s"%i] for i in "xyz"]).T
+        idx = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
         mas = dd["particle_mass"]
         pos = pos[idx]
         mas = mas[idx]
@@ -722,14 +722,14 @@
     if scene_distance is  None:
         scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
     if scene_fov is None:
-        radii = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))
+        radii = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))
         #idx= radii < sim_halo_radius*0.10
         #radii = radii[idx]
         #mass  = mas[idx] #copying mass into mas
-        si = na.argsort(radii)
+        si = np.argsort(radii)
         radii = radii[si]
         mass  = mas[si]
-        idx, = na.where(na.cumsum(mass)>mass.sum()/2.0)
+        idx, = np.where(np.cumsum(mass)>mass.sum()/2.0)
         re = radii[idx[0]]
         scene_fov = 5*re
         scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
@@ -745,11 +745,11 @@
     
     #rotate the camera
     if scene_rot :
-        irotation = na.eye(3)
-    sunrise_pos = matmul(irotation,na.array(scene_position)*scene_distance) #do NOT include sim center
+        irotation = np.eye(3)
+    sunrise_pos = matmul(irotation,np.array(scene_position)*scene_distance) #do NOT include sim center
     sunrise_up  = matmul(irotation,scene_up)
     sunrise_direction = -sunrise_pos
-    sunrise_afov = 2.0*na.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
+    sunrise_afov = 2.0*np.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
 
     #change to physical kpc
     sunrise_pos *= pf['kpc']
@@ -763,11 +763,11 @@
     use this to muliply two matricies, it will think that you're
     trying to multiply by a set of vectors and all hell will break
     loose."""    
-    assert type(v) is not na.matrix
-    v = na.asarray(v)
-    m, vs = [na.asmatrix(a) for a in (m, v)]
+    assert type(v) is not np.matrix
+    v = np.asarray(v)
+    m, vs = [np.asmatrix(a) for a in (m, v)]
 
-    result = na.asarray(na.transpose(m * na.transpose(vs)))    
+    result = np.asarray(np.transpose(m * np.transpose(vs)))    
     if len(v.shape) == 1:
         return result[0]
     return result
@@ -775,14 +775,14 @@
 
 def mag(vs):
     """Compute the norms of a set of vectors or a single vector."""
-    vs = na.asarray(vs)
+    vs = np.asarray(vs)
     if len(vs.shape) == 1:
-        return na.sqrt( (vs**2).sum() )
-    return na.sqrt( (vs**2).sum(axis=1) )
+        return np.sqrt( (vs**2).sum() )
+    return np.sqrt( (vs**2).sum(axis=1) )
 
 def mag2(vs):
     """Compute the norms of a set of vectors or a single vector."""
-    vs = na.asarray(vs)
+    vs = np.asarray(vs)
     if len(vs.shape) == 1:
         return (vs**2).sum()
     return (vs**2).sum(axis=1)
@@ -791,25 +791,25 @@
 def position_moment(rs, ms=None, axes=None):
     """Find second position moment tensor.
     If axes is specified, weight by the elliptical radius (Allgood 2005)"""
-    rs = na.asarray(rs)
+    rs = np.asarray(rs)
     Npart, N = rs.shape
-    if ms is None: ms = na.ones(Npart)
-    else: ms = na.asarray(ms)    
+    if ms is None: ms = np.ones(Npart)
+    else: ms = np.asarray(ms)    
     if axes is not None:
-        axes = na.asarray(axes,dtype=float64)
+        axes = np.asarray(axes,dtype=float64)
         axes = axes/axes.max()
         norms2 = mag2(rs/axes)
     else:
-        norms2 = na.ones(Npart)
+        norms2 = np.ones(Npart)
     M = ms.sum()
-    result = na.zeros((N,N))
+    result = np.zeros((N,N))
     # matrix is symmetric, so only compute half of it then fill in the
     # other half
     for i in range(N):
         for j in range(i+1):
             result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
         
-    result = result + result.transpose() - na.identity(N)*result
+    result = result + result.transpose() - np.identity(N)*result
     return result
     
 
@@ -826,7 +826,7 @@
     make the long axis line up with the x axis and the short axis line
     up with the x (z) axis for the 2 (3) dimensional case."""
     # Make sure the vectors are normalized and orthogonal
-    mag = lambda x: na.sqrt(na.sum(x**2.0))
+    mag = lambda x: np.sqrt(np.sum(x**2.0))
     v = v/mag(v)
     w = w/mag(w)    
     if check:
@@ -843,7 +843,7 @@
     w_prime = euler_passive(w,phi,theta,0.)
     if w_prime[0] < 0: w_prime = -w_prime
     # Now last Euler angle should just be this:
-    psi = na.arctan2(w_prime[1],w_prime[0])
+    psi = np.arctan2(w_prime[1],w_prime[0])
     return phi, theta, psi
 
 def find_euler_phi_theta(v):
@@ -851,19 +851,19 @@
     direction"""
     # Make sure the vector is normalized
     v = v/mag(v)
-    theta = na.arccos(v[2])
-    phi = na.arctan2(v[0],-v[1])
+    theta = np.arccos(v[2])
+    phi = np.arctan2(v[0],-v[1])
     return phi,theta
 
 def euler_matrix(phi, the, psi):
     """Make an Euler transformation matrix"""
-    cpsi=na.cos(psi)
-    spsi=na.sin(psi)
-    cphi=na.cos(phi)
-    sphi=na.sin(phi)
-    cthe=na.cos(the)
-    sthe=na.sin(the)
-    m = na.mat(na.zeros((3,3)))
+    cpsi=np.cos(psi)
+    spsi=np.sin(psi)
+    cphi=np.cos(phi)
+    sphi=np.sin(phi)
+    cthe=np.cos(the)
+    sthe=np.sin(the)
+    m = np.mat(np.zeros((3,3)))
     m[0,0] = cpsi*cphi - cthe*sphi*spsi
     m[0,1] = cpsi*sphi + cthe*cphi*spsi
     m[0,2] = spsi*sthe
@@ -912,9 +912,9 @@
 cameraset_ring = collections.OrderedDict()
 
 segments = 20
-for angle in na.linspace(0,360,segments):
-    pos = [na.cos(angle),0.,na.sin(angle)]
-    vc  = [na.cos(90-angle),0.,na.sin(90-angle)] 
+for angle in np.linspace(0,360,segments):
+    pos = [np.cos(angle),0.,np.sin(angle)]
+    vc  = [np.cos(90-angle),0.,np.sin(90-angle)] 
     cameraset_ring['02i'%angle]=(pos,vc)
             
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -30,7 +30,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import ParallelAnalysisInterface, parallel_blocking_call, parallel_root_only
 
 try:
-    from yt.utilities.kdtree import *
+    from yt.utilities.kdtree.api import *
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 
@@ -144,10 +144,10 @@
             length_range[0] = math.sqrt(3) * self.pf.h.get_smallest_dx()
         # Make the list of ruler lengths.
         if length_type == "lin":
-            self.lengths = na.linspace(length_range[0], length_range[1],
+            self.lengths = np.linspace(length_range[0], length_range[1],
                 length_number)
         elif length_type == "log":
-            self.lengths = na.logspace(math.log10(length_range[0]),
+            self.lengths = np.logspace(math.log10(length_range[0]),
                 math.log10(length_range[1]), length_number)
         else:
             # Something went wrong.
@@ -177,7 +177,7 @@
                 right_edge + self.lengths[-1], rank_ratio=self.vol_ratio)
         mylog.info("LE %s RE %s %s" % (str(self.LE), str(self.RE), str(self.ds)))
         self.width = self.ds.right_edge - self.ds.left_edge
-        self.mt = na.random.mtrand.RandomState(seed = 1234 * self.mine + salt)
+        self.mt = np.random.mtrand.RandomState(seed = 1234 * self.mine + salt)
     
     def add_function(self, function, out_labels, sqrt, corr_norm=None):
         r"""Add a function to the list that will be evaluated at the
@@ -265,7 +265,7 @@
                 mylog.info("Doing length %1.5e" % length)
             # Things stop when this value below equals total_values.
             self.generated_points = 0
-            self.gen_array = na.zeros(self.size, dtype='int64')
+            self.gen_array = np.zeros(self.size, dtype='int64')
             self.comm_cycle_count = 0
             self.final_comm_cycle_count = 0
             self.sent_done = False
@@ -280,7 +280,7 @@
                 t1 = time.time()
                 t_waiting += (t1-t0)
                 if (self.recv_points < -1.).any() or (self.recv_points > 1.).any(): # or \
-                        #(na.abs(na.log10(na.abs(self.recv_points))) > 20).any():
+                        #(np.abs(np.log10(np.abs(self.recv_points))) > 20).any():
                     raise ValueError("self.recv_points is no good!")
                 self.points = self.recv_points.copy()
                 self.fields_vals = self.recv_fields_vals.copy()
@@ -312,7 +312,7 @@
         xp = self.ds["x"]
         yp = self.ds["y"]
         zp = self.ds["z"]
-        fKD.pos = na.asfortranarray(na.empty((3,xp.size), dtype='float64'))
+        fKD.pos = np.asfortranarray(np.empty((3,xp.size), dtype='float64'))
         # Normalize the grid points only within the kdtree.
         fKD.pos[0, :] = xp[:] / self.period[0]
         fKD.pos[1, :] = yp[:] / self.period[1]
@@ -332,8 +332,8 @@
         xp = self.ds["x"]
         yp = self.ds["y"]
         zp = self.ds["z"]
-        self.sizes = [na.unique(xp).size, na.unique(yp).size, na.unique(zp).size]        
-        self.sort = na.lexsort([zp, yp, xp])
+        self.sizes = [np.unique(xp).size, np.unique(yp).size, np.unique(zp).size]        
+        self.sort = np.lexsort([zp, yp, xp])
         del xp, yp, zp
         self.ds.clear_data()
     
@@ -341,7 +341,7 @@
         """
         Builds an array to store the field values array.
         """
-        self.fields_vals = na.empty((self.comm_size, len(self.fields)*2), \
+        self.fields_vals = np.empty((self.comm_size, len(self.fields)*2), \
             dtype='float64')
         # At the same time build a dict to label the columns.
         self.fields_columns = {}
@@ -353,7 +353,7 @@
         Initializes the array that contains the random points as all negatives
         to start with.
         """
-        self.points = na.ones((self.comm_size, 6), dtype='float64') * -1.0
+        self.points = np.ones((self.comm_size, 6), dtype='float64') * -1.0
     
     def _setup_done_hooks_on_root(self):
         """
@@ -364,7 +364,7 @@
         self.recv_done = {}
         for task in xrange(self.size):
             if task == self.mine: continue
-            self.recv_done[task] = na.zeros(1, dtype='int64')
+            self.recv_done[task] = np.zeros(1, dtype='int64')
             self.done_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_done[task], \
                 task, tag=15))
     
@@ -376,13 +376,13 @@
         if self.sent_done: return
         if self.mine !=0:
             # I send when I *think* things should finish.
-            self.send_done = na.ones(1, dtype='int64') * \
+            self.send_done = np.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
             self.done_hooks.append(self.comm.mpi_nonblocking_send(self.send_done, \
                     0, tag=15))
         else:
             # As root, I need to mark myself!
-            self.recv_done[0] = na.ones(1, dtype='int64') * \
+            self.recv_done[0] = np.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
         self.sent_done = True
     
@@ -416,10 +416,10 @@
         Creates the recv buffers and calls a non-blocking MPI receive pointing
         to the left-hand neighbor.
         """
-        self.recv_points = na.ones((self.comm_size, 6), dtype='float64') * -1.
-        self.recv_fields_vals = na.zeros((self.comm_size, len(self.fields)*2), \
+        self.recv_points = np.ones((self.comm_size, 6), dtype='float64') * -1.
+        self.recv_fields_vals = np.zeros((self.comm_size, len(self.fields)*2), \
             dtype='float64')
-        self.recv_gen_array = na.zeros(self.size, dtype='int64')
+        self.recv_gen_array = np.zeros(self.size, dtype='int64')
         self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_points, \
             (self.mine-1)%self.size, tag=10))
         self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_fields_vals, \
@@ -470,7 +470,7 @@
         Picks out size random pairs separated by length *length*.
         """
         # First make random points inside this subvolume.
-        r1 = na.empty((size,3), dtype='float64')
+        r1 = np.empty((size,3), dtype='float64')
         for dim in range(3):
             r1[:,dim] = self.mt.uniform(low=self.ds.left_edge[dim],
                 high=self.ds.right_edge[dim], size=size)
@@ -480,15 +480,15 @@
         # but phi and theta are switched to the Physics convention.
         if self.constant_phi is None:
             phi = self.mt.uniform(low=0, high=2.*math.pi, size=size)
-        else: phi = self.constant_phi * na.ones(size, dtype='float64')
+        else: phi = self.constant_phi * np.ones(size, dtype='float64')
         if self.constant_theta is None:
             v = self.mt.uniform(low=0., high=1, size=size)
-            theta = na.arccos(2 * v - 1)
-        else: theta = self.constant_theta * na.ones(size, dtype='float64')
-        r2 = na.empty((size,3), dtype='float64')
-        r2[:,0] = r1[:,0] + length * na.cos(phi) * na.sin(theta)
-        r2[:,1] = r1[:,1] + length * na.sin(phi) * na.sin(theta)
-        r2[:,2] = r1[:,2] + length * na.cos(theta)
+            theta = np.arccos(2 * v - 1)
+        else: theta = self.constant_theta * np.ones(size, dtype='float64')
+        r2 = np.empty((size,3), dtype='float64')
+        r2[:,0] = r1[:,0] + length * np.cos(phi) * np.sin(theta)
+        r2[:,1] = r1[:,1] + length * np.sin(phi) * np.sin(theta)
+        r2[:,2] = r1[:,2] + length * np.cos(theta)
         # Reflect so it's inside the (full) volume.
         r2 %= self.period
         return (r1, r2)
@@ -508,7 +508,7 @@
             points[:, 1] = points[:, 1] / self.period[1]
             points[:, 2] = points[:, 2] / self.period[2]
             fKD.qv_many = points.T
-            fKD.nn_tags = na.asfortranarray(na.empty((1, points.shape[0]), dtype='int64'))
+            fKD.nn_tags = np.asfortranarray(np.empty((1, points.shape[0]), dtype='int64'))
             find_many_nn_nearest_neighbors()
             # The -1 is for fortran counting.
             n = fKD.nn_tags[0,:] - 1
@@ -521,7 +521,7 @@
         """
         # First find the grid data index field.
         indices = self._find_nearest_cell(points)
-        results = na.empty((len(indices), len(self.fields)), dtype='float64')
+        results = np.empty((len(indices), len(self.fields)), dtype='float64')
         # Put the field values into the columns of results.
         for field in self.fields:
             col = self.fields_columns[field]
@@ -547,7 +547,7 @@
                 self.generated_points += size
                 # If size != select.sum(), we need to pad the end of new_r1/r2
                 # which is what is effectively happening below.
-                newpoints = na.ones((ssum, 6), dtype='float64') * -1.
+                newpoints = np.ones((ssum, 6), dtype='float64') * -1.
                 newpoints[:size,:3] = new_r1
                 newpoints[:size,3:] = new_r2
                 # Now we insert them into self.points.
@@ -564,9 +564,9 @@
             # or I don't need to make any new points and I'm just processing the
             # array. Start by finding the indices of the points I own.
             self.points.shape = (self.comm_size*2, 3) # Doesn't make a copy - fast!
-            select = na.bitwise_or((self.points < self.ds.left_edge).any(axis=1),
+            select = np.bitwise_or((self.points < self.ds.left_edge).any(axis=1),
                 (self.points >= self.ds.right_edge).any(axis=1))
-            select = na.invert(select)
+            select = np.invert(select)
             mypoints = self.points[select]
             if mypoints.size > 0:
                 # Get the fields values.
@@ -583,19 +583,19 @@
             # To run the functions, what is key is that the
             # second point in the pair is ours.
             second_points = self.points[:,3:]
-            select = na.bitwise_or((second_points < self.ds.left_edge).any(axis=1),
+            select = np.bitwise_or((second_points < self.ds.left_edge).any(axis=1),
                 (second_points >= self.ds.right_edge).any(axis=1))
-            select = na.invert(select)
+            select = np.invert(select)
             if select.any():
                 points_to_eval = self.points[select]
                 fields_to_eval = self.fields_vals[select]
                 
                 # Find the normal vector between our points.
-                vec = na.abs(points_to_eval[:,:3] - points_to_eval[:,3:])
-                norm = na.sqrt(na.sum(na.multiply(vec,vec), axis=1))
+                vec = np.abs(points_to_eval[:,:3] - points_to_eval[:,3:])
+                norm = np.sqrt(np.sum(np.multiply(vec,vec), axis=1))
                 # I wish there was a better way to do this, but I can't find it.
                 for i, n in enumerate(norm):
-                    vec[i] = na.divide(vec[i], n)
+                    vec[i] = np.divide(vec[i], n)
                 
                 # Now evaluate the functions.
                 for fcn_set in self._fsets:
@@ -604,7 +604,7 @@
                     fcn_set._bin_results(length, fcn_results)
                 
                 # Now clear the buffers at the processed points.
-                self.points[select] = na.array([-1.]*6, dtype='float64')
+                self.points[select] = np.array([-1.]*6, dtype='float64')
                 
             else:
                 # We didn't clear any points, so we should move on with our
@@ -712,8 +712,8 @@
         self.corr_norm = corr_norm # A number used to normalize a correlation function.
         # These below are used to track how many times the function returns
         # unbinned results.
-        self.too_low = na.zeros(len(self.out_labels), dtype='int32')
-        self.too_high = na.zeros(len(self.out_labels), dtype='int32')
+        self.too_low = np.zeros(len(self.out_labels), dtype='int32')
+        self.too_high = np.zeros(len(self.out_labels), dtype='int32')
         
     def set_pdf_params(self, bin_type="lin", bin_number=1000, bin_range=None):
         r"""Set the parameters used to build the Probability Distribution Function
@@ -772,14 +772,14 @@
             bin_type, bin_number = [bin_type], [bin_number]
             bin_range = [bin_range]
         self.bin_type = bin_type
-        self.bin_number = na.array(bin_number) - 1
+        self.bin_number = np.array(bin_number) - 1
         self.dims = range(len(bin_type))
         # Create the dict that stores the arrays to store the bin hits, and
         # the arrays themselves.
         self.length_bin_hits = {}
         for length in self.tpf.lengths:
             # It's easier to index flattened, but will be unflattened later.
-            self.length_bin_hits[length] = na.zeros(self.bin_number,
+            self.length_bin_hits[length] = np.zeros(self.bin_number,
                 dtype='int64').flatten()
         # Create the bin edges for each dimension.
         # self.bins is indexed by dimension
@@ -792,10 +792,10 @@
                 raise ValueError("bin_range[1] must be larger than bin_range[0]")
             # Make the edges for this dimension.
             if bin_type[dim] == "lin":
-                self.bin_edges[dim] = na.linspace(bin_range[dim][0], bin_range[dim][1],
+                self.bin_edges[dim] = np.linspace(bin_range[dim][0], bin_range[dim][1],
                     bin_number[dim])
             elif bin_type[dim] == "log":
-                self.bin_edges[dim] = na.logspace(math.log10(bin_range[dim][0]),
+                self.bin_edges[dim] = np.logspace(math.log10(bin_range[dim][0]),
                     math.log10(bin_range[dim][1]), bin_number[dim])
             else:
                 raise SyntaxError("bin_edges is either \"lin\" or \"log\".")
@@ -822,32 +822,32 @@
         is flattened, so we need to figure out the offset for this hit by
         factoring the sizes of the other dimensions.
         """
-        hit_bin = na.zeros(results.shape[0], dtype='int64')
+        hit_bin = np.zeros(results.shape[0], dtype='int64')
         multi = 1
-        good = na.ones(results.shape[0], dtype='bool')
+        good = np.ones(results.shape[0], dtype='bool')
         for dim in range(len(self.out_labels)):
             for d1 in range(dim):
                 multi *= self.bin_edges[d1].size
             if dim == 0 and len(self.out_labels)==1:
                 try:
-                    digi = na.digitize(results, self.bin_edges[dim])
+                    digi = np.digitize(results, self.bin_edges[dim])
                 except ValueError:
                     # The user probably did something like 
                     # return a * b rather than
                     # return a[0] * b[0], which will only happen
                     # for single field functions.
-                    digi = na.digitize(results[0], self.bin_edges[dim])
+                    digi = np.digitize(results[0], self.bin_edges[dim])
             else:
-                digi = na.digitize(results[:,dim], self.bin_edges[dim])
+                digi = np.digitize(results[:,dim], self.bin_edges[dim])
             too_low = (digi == 0)
             too_high = (digi == self.bin_edges[dim].size)
             self.too_low[dim] += (too_low).sum()
             self.too_high[dim] += (too_high).sum()
-            newgood = na.bitwise_and(na.invert(too_low), na.invert(too_high))
-            good = na.bitwise_and(good, newgood)
-            hit_bin += na.multiply((digi - 1), multi)
-        digi_bins = na.arange(self.length_bin_hits[length].size+1)
-        hist, digi_bins = na.histogram(hit_bin[good], digi_bins)
+            newgood = np.bitwise_and(np.invert(too_low), np.invert(too_high))
+            good = np.bitwise_and(good, newgood)
+            hit_bin += np.multiply((digi - 1), multi)
+        digi_bins = np.arange(self.length_bin_hits[length].size+1)
+        hist, digi_bins = np.histogram(hit_bin[good], digi_bins)
         self.length_bin_hits[length] += hist
 
     def _dim_sum(self, a, dim):
@@ -855,11 +855,11 @@
         Given a multidimensional array a, this finds the sum over all the
         elements leaving the dimension dim untouched.
         """
-        dims = na.arange(len(a.shape))
-        dims = na.flipud(dims)
+        dims = np.arange(len(a.shape))
+        dims = np.flipud(dims)
         gt_dims = dims[dims > dim]
         lt_dims = dims[dims < dim]
-        iter_dims = na.concatenate((gt_dims, lt_dims))
+        iter_dims = np.concatenate((gt_dims, lt_dims))
         for this_dim in iter_dims:
             a = a.sum(axis=this_dim)
         return a
@@ -882,6 +882,6 @@
         """
         xi = {}
         for length in self.tpf.lengths:
-            xi[length] = -1 + na.sum(self.length_bin_hits[length] * \
+            xi[length] = -1 + np.sum(self.length_bin_hits[length] * \
                 self.bin_edges[0][:-1]) / self.corr_norm
         return xi


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/arraytypes.py
--- a/yt/arraytypes.py
+++ b/yt/arraytypes.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import numpy.core.records as rec
 
 # Now define convenience functions
@@ -41,5 +41,5 @@
     """
     blanks = []
     for atype in desc['formats']:
-        blanks.append(na.zeros(elements, dtype=atype))
+        blanks.append(np.zeros(elements, dtype=atype))
     return rec.fromarrays(blanks, **desc)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -38,6 +38,7 @@
     inline = 'False',
     numthreads = '-1',
     __withinreason = 'False',
+    __withintesting = 'False',
     __parallel = 'False',
     __global_parallel_rank = '0',
     __global_parallel_size = '1',
@@ -51,7 +52,9 @@
     pluginfilename = 'my_plugins.py',
     parallel_traceback = 'False',
     pasteboard_repo = '',
+    reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
+    test_data_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -24,7 +24,7 @@
 """
 
 import glob
-import numpy as na
+import numpy as np
 import os, os.path, inspect, types
 from functools import wraps
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -1,4 +1,4 @@
-"""
+""" 
 API for yt.data_objects
 
 Author: Matthew Turk <matthewturk at gmail.com>
@@ -62,6 +62,9 @@
     quantity_info, \
     add_quantity
 
+from image_array import \
+    ImageArray
+
 from field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import math
 import weakref
 import exceptions
@@ -99,11 +99,11 @@
         """
         YTSelectionContainer1D.__init__(self, pf, fields, **kwargs)
         self.positions = positions
-        self.dts = na.empty_like(positions[:,0])
-        self.dts[:-1] = na.sqrt(na.sum((self.positions[1:]-
+        self.dts = np.empty_like(positions[:,0])
+        self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
         self.dts[-1] = self.dts[-1]
-        self.ts = na.add.accumulate(self.dts)
+        self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])
         self._dts, self._ts = {}, {}
@@ -114,27 +114,27 @@
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
         # Check left faces first
-        min_streampoint = na.min(self.positions, axis=0)
-        max_streampoint = na.max(self.positions, axis=0)
-        p = na.all((min_streampoint <= RE) & (max_streampoint > LE), axis=1)
+        min_streampoint = np.min(self.positions, axis=0)
+        max_streampoint = np.max(self.positions, axis=0)
+        p = np.all((min_streampoint <= RE) & (max_streampoint > LE), axis=1)
         self._grids = self.hierarchy.grids[p]
 
     #@restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = na.logical_and(self._get_cut_mask(grid),
+        mask = np.logical_and(self._get_cut_mask(grid),
                               grid.child_mask)
         if field == 'dts': return self._dts[grid.id][mask]
         if field == 't': return self._ts[grid.id][mask]
         return grid[field][mask]
 
     def _get_cut_mask(self, grid):
-        mask = na.zeros(grid.ActiveDimensions, dtype='int')
-        dts = na.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = na.zeros(grid.ActiveDimensions, dtype='float64')
+        mask = np.zeros(grid.ActiveDimensions, dtype='int')
+        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
+        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
-        points_in_grid = na.all(self.positions > grid.LeftEdge, axis=1) & \
-                         na.all(self.positions <= grid.RightEdge, axis=1)
-        pids = na.where(points_in_grid)[0]
+        points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
+                         np.all(self.positions <= grid.RightEdge, axis=1)
+        pids = np.where(points_in_grid)[0]
         for i, pos in zip(pids, self.positions[points_in_grid]):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
@@ -223,10 +223,10 @@
         YTSelectionContainer2D.__init__(self, axis, pf, field_parameters)
         self.proj_style = style
         if style == "mip":
-            self.func = na.max
+            self.func = np.max
             op = "max"
         elif style == "integrate":
-            self.func = na.sum # for the future
+            self.func = np.sum # for the future
             op = "sum"
         else:
             raise NotImplementedError(style)
@@ -248,13 +248,13 @@
     def _get_tree(self, nvals):
         xd = self.pf.domain_dimensions[x_dict[self.axis]]
         yd = self.pf.domain_dimensions[y_dict[self.axis]]
-        return QuadTree(na.array([xd,yd], dtype='int64'), nvals,
+        return QuadTree(np.array([xd,yd], dtype='int64'), nvals,
                         style = self.proj_style)
 
     def _get_conv(self, fields):
         # Place holder for a time when maybe we will not be doing just
         # a single dx for every field.
-        convs = na.empty(len(fields), dtype="float64")
+        convs = np.empty(len(fields), dtype="float64")
         fields = self._determine_fields(fields)
         for i, field in enumerate(fields):
             fi = self._get_field_info(*field)
@@ -292,16 +292,16 @@
         px, py, pdx, pdy, nvals, nwvals = tree.get_all(False)
         nvals = self.comm.mpi_allreduce(nvals, op=op)
         nwvals = self.comm.mpi_allreduce(nwvals, op=op)
-        na.multiply(px, self.pf.domain_width[x_dict[self.axis]], px)
-        na.add(px, ox, px)
-        na.multiply(pdx, self.pf.domain_width[x_dict[self.axis]], pdx)
+        np.multiply(px, self.pf.domain_width[x_dict[self.axis]], px)
+        np.add(px, ox, px)
+        np.multiply(pdx, self.pf.domain_width[x_dict[self.axis]], pdx)
 
-        na.multiply(py, self.pf.domain_width[y_dict[self.axis]], py)
-        na.add(py, oy, py)
-        na.multiply(pdy, self.pf.domain_width[y_dict[self.axis]], pdy)
+        np.multiply(py, self.pf.domain_width[y_dict[self.axis]], py)
+        np.add(py, oy, py)
+        np.multiply(pdy, self.pf.domain_width[y_dict[self.axis]], pdy)
 
         if self.weight_field is not None:
-            na.divide(nvals, nwvals[:,None], nvals)
+            np.divide(nvals, nwvals[:,None], nvals)
         if self.weight_field is None:
             convs = self._get_conv(fields)
             nvals *= convs[None,:]
@@ -315,7 +315,7 @@
         data['fields'] = nvals
         # Now we run the finalizer, which is ignored if we don't need it
         fd = data['fields']
-        field_data = na.hsplit(data.pop('fields'), len(fields))
+        field_data = np.hsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             mylog.debug("Setting field %s", field)
             self[field] = field_data[fi].ravel()
@@ -334,21 +334,33 @@
             dl = 1.0
         else:
             dl = chunk.fwidth[:, self.axis]
-        v = na.empty((chunk.size, len(fields)), dtype="float64")
+        v = np.empty((chunk.size, len(fields)), dtype="float64")
         for i in range(len(fields)):
             v[:,i] = chunk[fields[i]] * dl
         if self.weight_field is not None:
             w = chunk[self.weight_field]
-            na.multiply(v, w[:,None], v)
-            na.multiply(w, dl, w)
+            np.multiply(v, w[:,None], v)
+            np.multiply(w, dl, w)
         else:
-            w = na.ones(chunk.size, dtype="float64")
+            w = np.ones(chunk.size, dtype="float64")
         icoords = chunk.icoords
         i1 = icoords[:,0]
         i2 = icoords[:,1]
         ilevel = chunk.ires
         tree.add_chunk_to_tree(i1, i2, ilevel, v, w)
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+               origin='center-window'):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        pw = self._get_pw(fields, center, width, origin, axes_unit, 'Projection')
+        return pw
+
 class YTOverlapProjBase(YTSelectionContainer2D):
     _top_node = "/Projections"
     _key_fields = YTSelectionContainer2D._key_fields + ['weight_field']
@@ -432,7 +444,7 @@
         self._max_level = max_level
         self._weight = weight_field
         self.preload_style = preload_style
-        self.func = na.sum # for the future
+        self.func = np.sum # for the future
         self.__retval_coords = {}
         self.__retval_fields = {}
         self.__retval_coarse = {}
@@ -490,7 +502,7 @@
             if field is None: continue
             dls.append(just_one(grid['d%s' % axis_names[self.axis]]))
             convs.append(self.pf.units[self.pf.field_info[field].projection_conversion])
-        return na.array(dls), na.array(convs)
+        return np.array(dls), np.array(convs)
 
     def __project_level(self, level, fields):
         grids_to_project = self.source.select_grids(level)
@@ -519,12 +531,12 @@
             field_data.append([pi[fine] for pi in self.__retval_fields[grid.id]])
             self.__retval_coords[grid.id] = [pi[coarse] for pi in self.__retval_coords[grid.id]]
             self.__retval_fields[grid.id] = [pi[coarse] for pi in self.__retval_fields[grid.id]]
-        coord_data = na.concatenate(coord_data, axis=1)
-        field_data = na.concatenate(field_data, axis=1)
+        coord_data = np.concatenate(coord_data, axis=1)
+        field_data = np.concatenate(field_data, axis=1)
         if self._weight is not None:
             field_data = field_data / coord_data[3,:].reshape((1,coord_data.shape[1]))
         else:
-            field_data *= convs[...,na.newaxis]
+            field_data *= convs[...,np.newaxis]
         mylog.info("Level %s done: %s final", \
                    level, coord_data.shape[1])
         pdx = grids_to_project[0].dds[x_dict[self.axis]] # this is our dl
@@ -549,7 +561,7 @@
                 args += self.__retval_coords[grid2.id] + [self.__retval_fields[grid2.id]]
                 args += self.__retval_coords[grid1.id] + [self.__retval_fields[grid1.id]]
                 args.append(1) # Refinement factor
-                args.append(na.ones(args[0].shape, dtype='int64'))
+                args.append(np.ones(args[0].shape, dtype='int64'))
                 kk = CombineGrids(*args)
                 goodI = args[-1].astype('bool')
                 self.__retval_coords[grid2.id] = \
@@ -576,8 +588,8 @@
                     # that this complicated rounding is because sometimes
                     # epsilon differences in dds between the grids causes this
                     # to round to up or down from the expected value.
-                    args.append(int(na.rint(grid2.dds / grid1.dds)[0]))
-                    args.append(na.ones(args[0].shape, dtype='int64'))
+                    args.append(int(np.rint(grid2.dds / grid1.dds)[0]))
+                    args.append(np.ones(args[0].shape, dtype='int64'))
                     kk = CombineGrids(*args)
                     goodI = args[-1].astype('bool')
                     self.__retval_coords[grid2.id] = \
@@ -618,8 +630,8 @@
                 self.__project_level(level, fields)
             coord_data.append(my_coords)
             field_data.append(my_fields)
-            pdxs.append(my_pdx * na.ones(my_coords.shape[1], dtype='float64'))
-            pdys.append(my_pdx * na.ones(my_coords.shape[1], dtype='float64'))
+            pdxs.append(my_pdx * np.ones(my_coords.shape[1], dtype='float64'))
+            pdys.append(my_pdx * np.ones(my_coords.shape[1], dtype='float64'))
             if self._check_region and False:
                 check=self.__cleanup_level(level - 1)
                 if len(check) > 0: all_data.append(check)
@@ -630,10 +642,10 @@
                 del self.__overlap_masks[grid.id]
             mylog.debug("End of projecting level level %s, memory usage %0.3e",
                         level, get_memory_usage()/1024.)
-        coord_data = na.concatenate(coord_data, axis=1)
-        field_data = na.concatenate(field_data, axis=1)
-        pdxs = na.concatenate(pdxs, axis=1)
-        pdys = na.concatenate(pdys, axis=1)
+        coord_data = np.concatenate(coord_data, axis=1)
+        field_data = np.concatenate(field_data, axis=1)
+        pdxs = np.concatenate(pdxs, axis=1)
+        pdys = np.concatenate(pdys, axis=1)
         # We now convert to half-widths and center-points
         data = {}
         data['pdx'] = pdxs; del pdxs
@@ -649,7 +661,7 @@
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
         data = self.comm.par_combine_object(data, datatype='dict', op='cat')
-        field_data = na.vsplit(data.pop('fields'), len(fields))
+        field_data = np.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()
             if self.serialize: self._store_fields(field, self._node_name)
@@ -665,7 +677,7 @@
         # in _get_data_from_grid *and* we attempt not to load weight data
         # independently of the standard field data.
         if self._weight is None:
-            weight_data = na.ones(grid.ActiveDimensions, dtype='float64')
+            weight_data = np.ones(grid.ActiveDimensions, dtype='float64')
             if zero_out: weight_data[grid.child_indices] = 0
             masked_data = [fd.astype('float64') * weight_data
                            for fd in self._get_data_from_grid(grid, fields)]
@@ -683,18 +695,18 @@
         weight_proj = self.func(weight_data, axis=self.axis)
         if (self._check_region and not self.source._is_fully_enclosed(grid)) or self._field_cuts is not None:
             used_data = self._get_points_in_region(grid).astype('bool')
-            used_points = na.where(na.logical_or.reduce(used_data, self.axis))
+            used_points = np.where(np.logical_or.reduce(used_data, self.axis))
         else:
-            used_data = na.array([1.0], dtype='bool')
+            used_data = np.array([1.0], dtype='bool')
             used_points = slice(None)
         if zero_out:
-            subgrid_mask = na.logical_and.reduce(
-                                na.logical_or(grid.child_mask,
+            subgrid_mask = np.logical_and.reduce(
+                                np.logical_or(grid.child_mask,
                                              ~used_data),
                                 self.axis).astype('int64')
         else:
-            subgrid_mask = na.ones(full_proj[0].shape, dtype='int64')
-        xind, yind = [arr[used_points].ravel() for arr in na.indices(full_proj[0].shape)]
+            subgrid_mask = np.ones(full_proj[0].shape, dtype='int64')
+        xind, yind = [arr[used_points].ravel() for arr in np.indices(full_proj[0].shape)]
         start_index = grid.get_global_startindex()
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
@@ -705,7 +717,7 @@
 
     def _get_points_in_region(self, grid):
         pointI = self.source._get_point_indices(grid, use_child_mask=False)
-        point_mask = na.zeros(grid.ActiveDimensions)
+        point_mask = np.zeros(grid.ActiveDimensions)
         point_mask[pointI] = 1.0
         if self._field_cuts is not None:
             for cut in self._field_cuts:
@@ -728,7 +740,7 @@
 class YTCoveringGridBase(YTSelectionContainer3D):
     _spatial = True
     _type_name = "covering_grid"
-    _con_args = ('level', 'left_edge', 'right_edge', 'ActiveDimensions')
+    _con_args = ('level', 'left_edge', 'ActiveDimensions')
     def __init__(self, level, left_edge, dims, fields = None,
                  pf = None, num_ghost_zones = 0, use_pbar = True, **kwargs):
         """A 3D region with all data extracted to a single, specified
@@ -752,23 +764,24 @@
 
         """
         YTSelectionContainer3D.__init__(self, center=kwargs.pop("center", None),
-                           fields=fields, pf=pf, **kwargs)
-        self.left_edge = na.array(left_edge)
+                           pf=pf, **kwargs)
+        self.left_edge = np.array(left_edge)
         self.level = level
-        self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.ActiveDimensions = na.array(dims,dtype='int32')
+        rdx = self.pf.domain_dimensions*self.pf.refine_by**level
+        self.dds = self.pf.domain_width/rdx.astype("float64")
+        self.ActiveDimensions = np.array(dims, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones
         self._use_pbar = use_pbar
-        self.global_startindex = na.rint((self.left_edge-self.pf.domain_left_edge)/self.dds).astype('int64')
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.global_startindex = np.rint((self.left_edge-self.pf.domain_left_edge)/self.dds).astype('int64')
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/self.dds).astype('int64')
-        self._refresh_data()
+        self.get_data(fields)
 
     def _get_list_of_grids(self, buffer = 0.0):
         if self._grids is not None: return
-        if na.any(self.left_edge - buffer < self.pf.domain_left_edge) or \
-           na.any(self.right_edge + buffer > self.pf.domain_right_edge):
+        if np.any(self.left_edge - buffer < self.pf.domain_left_edge) or \
+           np.any(self.right_edge + buffer > self.pf.domain_right_edge):
             grids,ind = self.pf.hierarchy.get_periodic_box_grids_below_level(
                             self.left_edge - buffer,
                             self.right_edge + buffer, self.level)
@@ -776,14 +789,14 @@
             grids,ind = self.pf.hierarchy.get_box_grids_below_level(
                 self.left_edge - buffer,
                 self.right_edge + buffer, self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind])
+        sort_ind = np.argsort(self.pf.h.grid_levels.ravel()[ind])
         self._grids = self.pf.hierarchy.grids[ind][(sort_ind,)][::-1]
 
     def _refresh_data(self):
         YTSelectionContainer3D._refresh_data(self)
-        self['dx'] = self.dds[0] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dy'] = self.dds[1] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dz'] = self.dds[2] * na.ones(self.ActiveDimensions, dtype='float64')
+        self['dx'] = self.dds[0] * np.ones(self.ActiveDimensions, dtype='float64')
+        self['dy'] = self.dds[1] * np.ones(self.ActiveDimensions, dtype='float64')
+        self['dz'] = self.dds[2] * np.ones(self.ActiveDimensions, dtype='float64')
 
     def get_data(self, fields):
         if self._grids is None:
@@ -800,7 +813,7 @@
                 except NeedsOriginalGrid, ngt_exception:
                     pass
             obtain_fields.append(field)
-            self[field] = na.zeros(self.ActiveDimensions, dtype='float64') -999
+            self[field] = np.zeros(self.ActiveDimensions, dtype='float64') -999
         if len(obtain_fields) == 0: return
         mylog.debug("Getting fields %s from %s possible grids",
                    obtain_fields, len(self._grids))
@@ -812,9 +825,9 @@
             count -= self._get_data_from_grid(grid, obtain_fields)
             if count <= 0: break
         if self._use_pbar: pbar.finish()
-        if count > 0 or na.any(self[obtain_fields[0]] == -999):
+        if count > 0 or np.any(self[obtain_fields[0]] == -999):
             # and self.dx < self.hierarchy.grids[0].dx:
-            n_bad = na.where(self[obtain_fields[0]]==-999)[0].size
+            n_bad = np.where(self[obtain_fields[0]]==-999)[0].size
             mylog.error("Covering problem: %s cells are uncovered", n_bad)
             raise KeyError(n_bad)
 
@@ -855,7 +868,7 @@
         g_fields = []
         for field in fields:
             if not grid.has_key(field): grid[field] = \
-               na.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
+               np.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
             g_fields.append(grid[field])
         c_fields = [self[field] for field in fields]
         FillRegion(ref_ratio,
@@ -949,7 +962,7 @@
         if self.level > 0:
             for field in fields_to_get:
                 self[field] = self[field][1:-1,1:-1,1:-1]
-                if na.any(self[field] == -999):
+                if np.any(self[field] == -999):
                     # and self.dx < self.hierarchy.grids[0].dx:
                     n_bad = (self[field]==-999).sum()
                     mylog.error("Covering problem: %s cells are uncovered", n_bad)
@@ -963,35 +976,35 @@
         self.field_data['cdz'] = dx[2]
         LL = self.left_edge - self.pf.domain_left_edge
         self._old_global_startindex = self.global_startindex
-        self.global_startindex = na.rint(LL / dx).astype('int64') - 1
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.global_startindex = np.rint(LL / dx).astype('int64') - 1
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/dx).astype('int64')
         if level == 0 and self.level > 0:
             # We use one grid cell at LEAST, plus one buffer on all sides
-            idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64') + 2
+            idims = np.rint((self.right_edge-self.left_edge)/dx).astype('int64') + 2
             fields = ensure_list(fields)
             for field in fields:
-                self.field_data[field] = na.zeros(idims,dtype='float64')-999
+                self.field_data[field] = np.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
         elif level == 0 and self.level == 0:
             DLE = self.pf.domain_left_edge
-            self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
-            idims = na.rint((self.ActiveDimensions*self.dds)/dx).astype('int64')
+            self.global_startindex = np.array(np.floor(LL/ dx), dtype='int64')
+            idims = np.rint((self.ActiveDimensions*self.dds)/dx).astype('int64')
             fields = ensure_list(fields)
             for field in fields:
-                self.field_data[field] = na.zeros(idims,dtype='float64')-999
+                self.field_data[field] = np.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
 
     def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
         input_left = (self._old_global_startindex + 0.5) * rf 
-        dx = na.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
-        output_dims = na.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
+        dx = np.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
+        output_dims = np.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
         self._cur_dims = output_dims
 
         for field in fields:
-            output_field = na.zeros(output_dims, dtype="float64")
+            output_field = np.zeros(output_dims, dtype="float64")
             output_left = self.global_startindex + 0.5
             ghost_zone_interpolate(rf, self[field], input_left,
                                    output_field, output_left)
@@ -1056,30 +1069,30 @@
         >>> print fproj["Density"]
         """
         YTSelectionContainer2D.__init__(self, axis, pf, field_parameters)
-        self.left_edge = na.array(left_edge)
+        self.left_edge = np.array(left_edge)
         self.level = level
         self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.dims = na.array([dims]*2)
-        self.ActiveDimensions = na.array([dims]*3, dtype='int32')
+        self.dims = np.array([dims]*2)
+        self.ActiveDimensions = np.array([dims]*3, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
-        self.global_startindex = na.rint((self.left_edge - self.pf.domain_left_edge)
+        self.global_startindex = np.rint((self.left_edge - self.pf.domain_left_edge)
                                          /self.dds).astype('int64')
         self._dls = {}
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/self.dds).astype('int64')
         self._refresh_data()
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
-        if na.any(self.left_edge < self.pf.domain_left_edge) or \
-           na.any(self.right_edge > self.pf.domain_right_edge):
+        if np.any(self.left_edge < self.pf.domain_left_edge) or \
+           np.any(self.right_edge > self.pf.domain_right_edge):
             grids,ind = self.pf.hierarchy.get_periodic_box_grids(
                             self.left_edge, self.right_edge)
         else:
             grids,ind = self.pf.hierarchy.get_box_grids(
                             self.left_edge, self.right_edge)
         level_ind = (self.pf.hierarchy.grid_levels.ravel()[ind] <= self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
+        sort_ind = np.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
         self._grids = self.pf.hierarchy.grids[ind][level_ind][(sort_ind,)][::-1]
 
     def _generate_coords(self):
@@ -1087,9 +1100,9 @@
         yax = y_dict[self.axis]
         ci = self.left_edge + self.dds*0.5
         cf = self.left_edge + self.dds*(self.ActiveDimensions-0.5)
-        cx = na.mgrid[ci[xax]:cf[xax]:self.ActiveDimensions[xax]*1j]
-        cy = na.mgrid[ci[yax]:cf[yax]:self.ActiveDimensions[yax]*1j]
-        blank = na.ones( (self.ActiveDimensions[xax],
+        cx = np.mgrid[ci[xax]:cf[xax]:self.ActiveDimensions[xax]*1j]
+        cy = np.mgrid[ci[yax]:cf[yax]:self.ActiveDimensions[yax]*1j]
+        blank = np.ones( (self.ActiveDimensions[xax],
                           self.ActiveDimensions[yax]), dtype='float64')
         self['px'] = cx[None,:] * blank
         self['py'] = cx[:,None] * blank
@@ -1108,7 +1121,7 @@
         if len(fields_to_get) == 0: return
         temp_data = {}
         for field in fields_to_get:
-            self[field] = na.zeros(self.dims, dtype='float64')
+            self[field] = np.zeros(self.dims, dtype='float64')
         dls = self.__setup_dls(fields_to_get)
         for i,grid in enumerate(self._get_grids()):
             mylog.debug("Getting fields from %s", i)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1,4 +1,4 @@
-"""
+""" 
 The base classes for selecting and returning data.
 
 Author: Matthew Turk <matthewturk at gmail.com>
@@ -30,7 +30,7 @@
 
 data_object_registry = {}
 
-import numpy as na
+import numpy as np
 import weakref
 import shelve
 from contextlib import contextmanager
@@ -56,12 +56,12 @@
         return item
     except AttributeError:
         if item:
-            return na.ones(shape, dtype='bool')
+            return np.ones(shape, dtype='bool')
         else:
-            return na.zeros(shape, dtype='bool')
+            return np.zeros(shape, dtype='bool')
 
 def restore_field_information_state(func):
-    """
+    """ 
     A decorator that takes a function with the API of (self, grid, field)
     and ensures that after the function is called, the field_parameters will
     be returned to normal.
@@ -75,14 +75,14 @@
     return save_state
 
 class YTFieldData(dict):
-    """
+    """ 
     A Container object for field data, instead of just having it be a dict.
     """
     pass
         
 
 class YTDataContainer(object):
-    """
+    """ 
     Generic YTDataContainer container.  By itself, will attempt to
     generate field, read fields (method defined by derived classes)
     and deal with passing back and forth field parameters.
@@ -100,7 +100,7 @@
                 data_object_registry[cls._type_name] = cls
 
     def __init__(self, pf, field_parameters):
-        """
+        """ 
         Typically this is never called directly, but only due to inheritance.
         It associates a :class:`~yt.data_objects.api.StaticOutput` with the class,
         sets its initial set of fields, and the remainder of the arguments
@@ -120,14 +120,15 @@
 
     def _set_default_field_parameters(self):
         self.field_parameters = {}
-        self.set_field_parameter("center",na.zeros(3,dtype='float64'))
-        self.set_field_parameter("bulk_velocity",na.zeros(3,dtype='float64'))
+        self.set_field_parameter("center",np.zeros(3,dtype='float64'))
+        self.set_field_parameter("bulk_velocity",np.zeros(3,dtype='float64'))
+        self.set_field_parameter("normal",np.array([0,0,1],dtype='float64'))
 
     def _set_center(self, center):
         if center is None:
             pass
-        elif isinstance(center, (types.ListType, types.TupleType, na.ndarray)):
-            center = na.array(center)
+        elif isinstance(center, (types.ListType, types.TupleType, np.ndarray)):
+            center = np.array(center)
         elif center in ("c", "center"):
             center = self.pf.domain_center
         elif center == ("max"): # is this dangerous for race conditions?
@@ -135,7 +136,7 @@
         elif center.startswith("max_"):
             center = self.pf.h.find_max(center[4:])[1]
         else:
-            center = na.array(center, dtype='float64')
+            center = np.array(center, dtype='float64')
         self.center = center
         self.set_field_parameter('center', center)
 
@@ -230,7 +231,7 @@
         try:
             self.pf.field_info[fname].check_available(gen_obj)
         except NeedsGridType, ngt_exception:
-            rv = na.empty(self.size, dtype="float64")
+            rv = np.empty(self.size, dtype="float64")
             ind = 0
             ngz = ngt_exception.ghost_zones
             for io_chunk in self.chunks([], "io"):
@@ -262,7 +263,7 @@
             if ngt_exception.ghost_zones != 0:
                 raise NotImplementedError
             size = self._count_particles(ftype)
-            rv = na.empty(size, dtype="float64")
+            rv = np.empty(size, dtype="float64")
             ind = 0
             for io_chunk in self.chunks([], "io"):
                 for i,chunk in enumerate(self.chunks(field, "spatial")):
@@ -311,7 +312,7 @@
         field_order += [field for field in fields if field not in field_order]
         fid = open(filename,"w")
         fid.write("\t".join(["#"] + field_order + ["\n"]))
-        field_data = na.array([self.field_data[field] for field in field_order])
+        field_data = np.array([self.field_data[field] for field in field_order])
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -518,7 +519,7 @@
 
 class YTSelectionContainer2D(YTSelectionContainer):
     _key_fields = ['px','py','pdx','pdy']
-    """
+    """ 
     Class to represent a set of :class:`YTDataContainer` that's 2-D in nature, and
     thus does not have as many actions as the 3-D data types.
     """
@@ -537,6 +538,23 @@
     def _convert_field_name(self, field):
         return field
 
+    def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
+        axis = self.axis
+        if fields == None:
+            if self.fields == None:
+                raise SyntaxError("The fields keyword argument must be set")
+        else:
+            self.fields = ensure_list(fields)
+        from yt.visualization.plot_window import \
+            GetBoundsAndCenter, PWViewerMPL
+        from yt.visualization.fixed_resolution import FixedResolutionBuffer
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, self.pf)
+        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
+                         plot_type=plot_type)
+        pw.set_axes_unit(axes_unit)
+        return pw
+
+
     def to_frb(self, width, resolution, center=None, height=None):
         r"""This function returns a FixedResolutionBuffer generated from this
         object.
@@ -571,7 +589,7 @@
 
         >>> proj = pf.h.proj("Density", 0)
         >>> frb = proj.to_frb( (100.0, 'kpc'), 1024)
-        >>> write_image(na.log10(frb["Density"]), 'density_100kpc.png')
+        >>> write_image(np.log10(frb["Density"]), 'density_100kpc.png')
         """
         
         if (self.pf.geometry == "cylindrical" and self.axis == 1) or \
@@ -603,26 +621,6 @@
         frb = FixedResolutionBuffer(self, bounds, resolution)
         return frb
 
-    def to_pw(self):
-        r"""Create a :class:`~yt.visualization.plot_window.PlotWindow` from this
-        object.
- 
-        This is a bare-bones mechanism of creating a plot window from this
-        object, which can then be moved around, zoomed, and on and on.  All
-        behavior of the plot window is relegated to that routine.
-        """
-        axis = self.axis
-        center = self.get_field_parameter("center")
-        if center is None:
-            center = (self.pf.domain_right_edge
-                    + self.pf.domain_left_edge)/2.0
-        width = (1.0, 'unitary')
-        from yt.visualization.plot_window import \
-            PWViewerMPL, GetBoundsAndCenter
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, self.pf)
-        pw = PWViewerMPL(self, bounds)
-        return pw
-
 class YTSelectionContainer3D(YTSelectionContainer):
     _key_fields = ['x','y','z','dx','dy','dz']
     """
@@ -738,16 +736,16 @@
                 samples.append(svals)
             verts.append(my_verts)
         pb.finish()
-        verts = na.concatenate(verts).transpose()
+        verts = np.concatenate(verts).transpose()
         verts = self.comm.par_combine_object(verts, op='cat', datatype='array')
         verts = verts.transpose()
         if sample_values is not None:
-            samples = na.concatenate(samples)
+            samples = np.concatenate(samples)
             samples = self.comm.par_combine_object(samples, op='cat',
                                 datatype='array')
         if rescale:
-            mi = na.min(verts, axis=0)
-            ma = na.max(verts, axis=0)
+            mi = np.min(verts, axis=0)
+            ma = np.max(verts, axis=0)
             verts = (verts - mi) / (ma - mi).max()
         if filename is not None and self.comm.rank == 0:
             f = open(filename, "w")
@@ -849,7 +847,7 @@
         mask = self._get_cut_mask(grid) * grid.child_mask
         vals = grid.get_vertex_centered_data(field)
         if fluxing_field is None:
-            ff = na.ones(vals.shape, dtype="float64")
+            ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
         xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
@@ -866,10 +864,10 @@
         them to be plotted.
         """
         if log_space:
-            cons = na.logspace(na.log10(min_val),na.log10(max_val),
+            cons = np.logspace(np.log10(min_val),np.log10(max_val),
                                num_levels+1)
         else:
-            cons = na.linspace(min_val, max_val, num_levels+1)
+            cons = np.linspace(min_val, max_val, num_levels+1)
         contours = {}
         if cache: cached_fields = defaultdict(lambda: dict())
         else: cached_fields = None
@@ -898,7 +896,7 @@
         """
         for grid in self._grids:
             if default_value != None:
-                grid[field] = na.ones(grid.ActiveDimensions)*default_value
+                grid[field] = np.ones(grid.ActiveDimensions)*default_value
             grid[field][self._get_point_indices(grid)] = value
 
     _particle_handler = None
@@ -1001,36 +999,36 @@
         grid_vals, xi, yi, zi = [], [], [], []
         for grid in self._base_region._grids:
             xit,yit,zit = self._base_region._get_point_indices(grid)
-            grid_vals.append(na.ones(xit.shape, dtype='int') * (grid.id-grid._id_offset))
+            grid_vals.append(np.ones(xit.shape, dtype='int') * (grid.id-grid._id_offset))
             xi.append(xit)
             yi.append(yit)
             zi.append(zit)
-        grid_vals = na.concatenate(grid_vals)[self._base_indices]
-        grid_order = na.argsort(grid_vals)
+        grid_vals = np.concatenate(grid_vals)[self._base_indices]
+        grid_order = np.argsort(grid_vals)
         # Note: grid_vals is still unordered
-        grid_ids = na.unique(grid_vals)
-        xi = na.concatenate(xi)[self._base_indices][grid_order]
-        yi = na.concatenate(yi)[self._base_indices][grid_order]
-        zi = na.concatenate(zi)[self._base_indices][grid_order]
-        bc = na.bincount(grid_vals)
+        grid_ids = np.unique(grid_vals)
+        xi = np.concatenate(xi)[self._base_indices][grid_order]
+        yi = np.concatenate(yi)[self._base_indices][grid_order]
+        zi = np.concatenate(zi)[self._base_indices][grid_order]
+        bc = np.bincount(grid_vals)
         splits = []
         for i,v in enumerate(bc):
             if v > 0: splits.append(v)
-        splits = na.add.accumulate(splits)
-        xis, yis, zis = [na.array_split(aa, splits) for aa in [xi,yi,zi]]
+        splits = np.add.accumulate(splits)
+        xis, yis, zis = [np.array_split(aa, splits) for aa in [xi,yi,zi]]
         self._indices = {}
         h = self._base_region.pf.h
         for grid_id, x, y, z in itertools.izip(grid_ids, xis, yis, zis):
             # grid_id needs no offset
             ll = h.grids[grid_id].ActiveDimensions.prod() \
-               - (na.logical_not(h.grids[grid_id].child_mask)).sum()
+               - (np.logical_not(h.grids[grid_id].child_mask)).sum()
             # This means we're completely enclosed, except for child masks
             if x.size == ll:
                 self._indices[grid_id] = None
             else:
                 # This will slow things down a bit, but conserve memory
                 self._indices[grid_id] = \
-                    na.zeros(h.grids[grid_id].ActiveDimensions, dtype='bool')
+                    np.zeros(h.grids[grid_id].ActiveDimensions, dtype='bool')
                 self._indices[grid_id][(x,y,z)] = True
         self._grids = h.grids[self._indices.keys()]
 
@@ -1042,16 +1040,16 @@
         return False
 
     def _get_cut_mask(self, grid):
-        cm = na.zeros(grid.ActiveDimensions, dtype='bool')
+        cm = np.zeros(grid.ActiveDimensions, dtype='bool')
         cm[self._get_point_indices(grid, False)] = True
         return cm
 
-    __empty_array = na.array([], dtype='bool')
+    __empty_array = np.array([], dtype='bool')
     def _get_point_indices(self, grid, use_child_mask=True):
         # Yeah, if it's not true, we don't care.
         tr = self._indices.get(grid.id-grid._id_offset, self.__empty_array)
-        if tr is None: tr = na.where(grid.child_mask)
-        else: tr = na.where(tr)
+        if tr is None: tr = np.where(grid.child_mask)
+        else: tr = np.where(tr)
         return tr
 
     def __repr__(self):
@@ -1068,7 +1066,7 @@
             grid = self.pf.h.grids[g]
             if g in other._indices and g in self._indices:
                 # We now join the indices
-                ind = na.zeros(grid.ActiveDimensions, dtype='bool')
+                ind = np.zeros(grid.ActiveDimensions, dtype='bool')
                 ind[self._indices[g]] = True
                 ind[other._indices[g]] = True
                 if ind.prod() == grid.ActiveDimensions.prod(): ind = None
@@ -1105,7 +1103,7 @@
         return False
 
     def _get_cut_mask(self, grid):
-        point_mask = na.ones(grid.ActiveDimensions, dtype='bool')
+        point_mask = np.ones(grid.ActiveDimensions, dtype='bool')
         point_mask *= self._base_region._get_cut_mask(grid)
         for cut in self._field_cuts:
             point_mask *= eval(cut)
@@ -1160,7 +1158,7 @@
             self._all_regions.append(item)
             # So cut_masks don't get messed up.
             item._boolean_touched = True
-        self._all_regions = na.unique(self._all_regions)
+        self._all_regions = np.unique(self._all_regions)
 
     def _make_overlaps(self):
         # Using the processed cut_masks, we'll figure out what grids
@@ -1185,7 +1183,7 @@
                 # The whole grid is in the hybrid region if a) its cut_mask
                 # in the original region is identical to the new one and b)
                 # the original region cut_mask is all ones.
-                if (local == na.bitwise_and(overall, local)).all() and \
+                if (local == np.bitwise_and(overall, local)).all() and \
                         (local == True).all():
                     self._all_overlap.append(grid)
                     continue
@@ -1213,7 +1211,7 @@
         return (grid in self._all_overlap)
 
     def _get_list_of_grids(self):
-        self._grids = na.array(self._some_overlap + self._all_overlap,
+        self._grids = np.array(self._some_overlap + self._all_overlap,
             dtype='object')
 
     def _get_cut_mask(self, grid, field=None):
@@ -1270,13 +1268,13 @@
             if i == 0: continue
             if item == "AND":
                 # So, the next item in level_masks we want to AND.
-                na.bitwise_and(this_cut_mask, level_masks[i+1], this_cut_mask)
+                np.bitwise_and(this_cut_mask, level_masks[i+1], this_cut_mask)
             if item == "NOT":
                 # It's convenient to remember that NOT == AND NOT
-                na.bitwise_and(this_cut_mask, na.invert(level_masks[i+1]),
+                np.bitwise_and(this_cut_mask, np.invert(level_masks[i+1]),
                     this_cut_mask)
             if item == "OR":
-                na.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
+                np.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
         if not isinstance(grid, FakeGridForParticles):
             self._cut_masks[grid.id] = this_cut_mask
         return this_cut_mask


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -67,14 +67,14 @@
             rv = self.func(ds, *args, **kwargs)
             if not iterable(rv): rv = (rv,)
             for i in range(self.n_ret): retvals[i].append(rv[i])
-        retvals = [na.array(retvals[i]) for i in range(self.n_ret)]
+        retvals = [np.array(retvals[i]) for i in range(self.n_ret)]
         # Note that we do some fancy footwork here.
         # _par_combine_object and its affiliated alltoall function
         # assume that the *long* axis is the last one.  However,
         # our long axis is the first one!
         rv = []
         for my_list in retvals:
-            data = na.array(my_list).transpose()
+            data = np.array(my_list).transpose()
             rv.append(self.comm.par_combine_object(data,
                         datatype="array", op="cat").transpose())
         retvals = rv
@@ -146,7 +146,7 @@
 
     return x,y,z, den
 def _combCenterOfMass(data, x,y,z, den):
-    return na.array([x.sum(), y.sum(), z.sum()])/den.sum()
+    return np.array([x.sum(), y.sum(), z.sum()])/den.sum()
 add_quantity("CenterOfMass", function=_CenterOfMass,
              combine_function=_combCenterOfMass, n_ret = 4)
 
@@ -179,7 +179,7 @@
     xv = xv.sum()/w
     yv = yv.sum()/w
     zv = zv.sum()/w
-    return na.array([xv, yv, zv])
+    return np.array([xv, yv, zv])
 add_quantity("BulkVelocity", function=_BulkVelocity,
              combine_function=_combBulkVelocity, n_ret=4)
 
@@ -210,9 +210,9 @@
     return [j_mag]
 
 def _combAngularMomentumVector(data, j_mag):
-    if len(j_mag.shape) < 2: j_mag = na.expand_dims(j_mag, 0)
+    if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
     L_vec = j_mag.sum(axis=0)
-    L_vec_norm = L_vec / na.sqrt((L_vec**2.0).sum())
+    L_vec_norm = L_vec / np.sqrt((L_vec**2.0).sum())
     return L_vec_norm
 add_quantity("AngularMomentumVector", function=_AngularMomentumVector,
              combine_function=_combAngularMomentumVector, n_ret=1)
@@ -229,17 +229,17 @@
     amx = data["SpecificAngularMomentumX"]*data["CellMassMsun"]
     amy = data["SpecificAngularMomentumY"]*data["CellMassMsun"]
     amz = data["SpecificAngularMomentumZ"]*data["CellMassMsun"]
-    j_mag = na.array([amx.sum(), amy.sum(), amz.sum()])
-    e_term_pre = na.sum(data["CellMassMsun"]*data["VelocityMagnitude"]**2.0)
+    j_mag = np.array([amx.sum(), amy.sum(), amz.sum()])
+    e_term_pre = np.sum(data["CellMassMsun"]*data["VelocityMagnitude"]**2.0)
     weight=data["CellMassMsun"].sum()
     return j_mag, m_enc, e_term_pre, weight
 def _combBaryonSpinParameter(data, j_mag, m_enc, e_term_pre, weight):
     # Because it's a vector field, we have to ensure we have enough dimensions
-    if len(j_mag.shape) < 2: j_mag = na.expand_dims(j_mag, 0)
+    if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
     W = weight.sum()
     M = m_enc.sum()
-    J = na.sqrt(((j_mag.sum(axis=0))**2.0).sum())/W
-    E = na.sqrt(e_term_pre.sum()/W)
+    J = np.sqrt(((j_mag.sum(axis=0))**2.0).sum())/W
+    E = np.sqrt(e_term_pre.sum()/W)
     G = 6.67e-8 # cm^3 g^-1 s^-2
     spin = J * E / (M*1.989e33*G)
     return spin
@@ -253,11 +253,11 @@
     """
     m_enc = data["CellMassMsun"].sum() + data["ParticleMassMsun"].sum()
     amx = data["ParticleSpecificAngularMomentumX"]*data["ParticleMassMsun"]
-    if amx.size == 0: return (na.zeros((3,), dtype='float64'), m_enc, 0, 0)
+    if amx.size == 0: return (np.zeros((3,), dtype='float64'), m_enc, 0, 0)
     amy = data["ParticleSpecificAngularMomentumY"]*data["ParticleMassMsun"]
     amz = data["ParticleSpecificAngularMomentumZ"]*data["ParticleMassMsun"]
-    j_mag = na.array([amx.sum(), amy.sum(), amz.sum()])
-    e_term_pre = na.sum(data["ParticleMassMsun"]
+    j_mag = np.array([amx.sum(), amy.sum(), amz.sum()])
+    e_term_pre = np.sum(data["ParticleMassMsun"]
                        *data["ParticleVelocityMagnitude"]**2.0)
     weight=data["ParticleMassMsun"].sum()
     return j_mag, m_enc, e_term_pre, weight
@@ -321,15 +321,15 @@
         thermal = (data["ThermalEnergy"] * mass_to_use).sum()
         kinetic += thermal
     if periodic_test:
-        kinetic = na.ones_like(kinetic)
+        kinetic = np.ones_like(kinetic)
     # Gravitational potential energy
     # We only divide once here because we have velocity in cgs, but radius is
     # in code.
     G = 6.67e-8 / data.convert("cm") # cm^3 g^-1 s^-2
     # Check for periodicity of the clump.
-    two_root = 2. / na.array(data.pf.domain_dimensions)
+    two_root = 2. / np.array(data.pf.domain_dimensions)
     domain_period = data.pf.domain_right_edge - data.pf.domain_left_edge
-    periodic = na.array([0., 0., 0.])
+    periodic = np.array([0., 0., 0.])
     for i,dim in enumerate(["x", "y", "z"]):
         sorted = data[dim][data[dim].argsort()]
         # If two adjacent values are different by (more than) two root grid
@@ -341,7 +341,7 @@
             # define the gap from the right boundary, which we'll use for the
             # periodic adjustment later.
             sel = (diff >= two_root[i])
-            index = na.min(na.nonzero(sel))
+            index = np.min(np.nonzero(sel))
             # The last addition term below ensures that the data makes a full
             # wrap-around.
             periodic[i] = data.pf.domain_right_edge[i] - sorted[index + 1] + \
@@ -363,26 +363,26 @@
             local_data[dim] += periodic[i]
             local_data[dim] %= domain_period[i]
     if periodic_test:
-        local_data["CellMass"] = na.ones_like(local_data["CellMass"])
+        local_data["CellMass"] = np.ones_like(local_data["CellMass"])
     import time
     t1 = time.time()
     if treecode:
         # Calculate the binding energy using the treecode method.
         # Faster but less accurate.
         # The octree doesn't like uneven root grids, so we will make it cubical.
-        root_dx = 1./na.array(data.pf.domain_dimensions).astype('float64')
-        left = min([na.amin(local_data['x']), na.amin(local_data['y']),
-            na.amin(local_data['z'])])
-        right = max([na.amax(local_data['x']), na.amax(local_data['y']),
-            na.amax(local_data['z'])])
-        cover_min = na.array([left, left, left])
-        cover_max = na.array([right, right, right])
+        root_dx = 1./np.array(data.pf.domain_dimensions).astype('float64')
+        left = min([np.amin(local_data['x']), np.amin(local_data['y']),
+            np.amin(local_data['z'])])
+        right = max([np.amax(local_data['x']), np.amax(local_data['y']),
+            np.amax(local_data['z'])])
+        cover_min = np.array([left, left, left])
+        cover_max = np.array([right, right, right])
         # Fix the coverage to match to root grid cell left 
         # edges for making indexes.
         cover_min = cover_min - cover_min % root_dx
         cover_max = cover_max - cover_max % root_dx
-        cover_imin = (cover_min * na.array(data.pf.domain_dimensions)).astype('int64')
-        cover_imax = (cover_max * na.array(data.pf.domain_dimensions) + 1).astype('int64')
+        cover_imin = (cover_min * np.array(data.pf.domain_dimensions)).astype('int64')
+        cover_imax = (cover_max * np.array(data.pf.domain_dimensions) + 1).astype('int64')
         cover_ActiveDimensions = cover_imax - cover_imin
         # Create the octree with these dimensions.
         # One value (mass) with incremental=True.
@@ -390,12 +390,12 @@
         #print 'here', cover_ActiveDimensions
         # Now discover what levels this data comes from, not assuming
         # symmetry.
-        dxes = na.unique(data['dx']) # unique returns a sorted array,
-        dyes = na.unique(data['dy']) # so these will all have the same
-        dzes = na.unique(data['dz']) # order.
+        dxes = np.unique(data['dx']) # unique returns a sorted array,
+        dyes = np.unique(data['dy']) # so these will all have the same
+        dzes = np.unique(data['dz']) # order.
         # We only need one dim to figure out levels, we'll use x.
         dx = 1./data.pf.domain_dimensions[0]
-        levels = (na.log(dx / dxes) / na.log(data.pf.refine_by)).astype('int')
+        levels = (np.log(dx / dxes) / np.log(data.pf.refine_by)).astype('int')
         lsort = levels.argsort()
         levels = levels[lsort]
         dxes = dxes[lsort]
@@ -408,9 +408,9 @@
             thisx = (local_data["x"][sel] / dx).astype('int64') - cover_imin[0] * 2**L
             thisy = (local_data["y"][sel] / dy).astype('int64') - cover_imin[1] * 2**L
             thisz = (local_data["z"][sel] / dz).astype('int64') - cover_imin[2] * 2**L
-	    vals = na.array([local_data["CellMass"][sel]], order='F')
+	    vals = np.array([local_data["CellMass"][sel]], order='F')
             octree.add_array_to_tree(L, thisx, thisy, thisz, vals,
-               na.ones_like(thisx).astype('float64'), treecode = 1)
+               np.ones_like(thisx).astype('float64'), treecode = 1)
         # Now we calculate the binding energy using a treecode.
         octree.finalize(treecode = 1)
         mylog.info("Using a treecode to find gravitational energy for %d cells." % local_data['x'].size)
@@ -445,7 +445,7 @@
     m = (data['CellMass'] * mass_scale_factor).astype('float32')
     assert(m.size > bsize)
 
-    gsize=int(na.ceil(float(m.size)/bsize))
+    gsize=int(np.ceil(float(m.size)/bsize))
     assert(gsize > 16)
 
     # Now the tedious process of rescaling our values...
@@ -453,7 +453,7 @@
     x = ((data['x'] - data['x'].min()) * length_scale_factor).astype('float32')
     y = ((data['y'] - data['y'].min()) * length_scale_factor).astype('float32')
     z = ((data['z'] - data['z'].min()) * length_scale_factor).astype('float32')
-    p = na.zeros(z.shape, dtype='float32')
+    p = np.zeros(z.shape, dtype='float32')
     
     x_gpu = cuda.mem_alloc(x.size * x.dtype.itemsize)
     y_gpu = cuda.mem_alloc(y.size * y.dtype.itemsize)
@@ -530,7 +530,7 @@
          block=(bsize,1,1), grid=(gsize, gsize), time_kernel=True)
     cuda.memcpy_dtoh(p, p_gpu)
     p1 = p.sum()
-    if na.any(na.isnan(p)): raise ValueError
+    if np.any(np.isnan(p)): raise ValueError
     return p1 * (length_scale_factor / (mass_scale_factor**2.0))
 
 def _Extrema(data, fields, non_zero = False, filter=None):
@@ -574,9 +574,9 @@
                 maxs.append(-1e90)
     return len(fields), mins, maxs
 def _combExtrema(data, n_fields, mins, maxs):
-    mins, maxs = na.atleast_2d(mins, maxs)
+    mins, maxs = np.atleast_2d(mins, maxs)
     n_fields = mins.shape[1]
-    return [(na.min(mins[:,i]), na.max(maxs[:,i])) for i in range(n_fields)]
+    return [(np.min(mins[:,i]), np.max(maxs[:,i])) for i in range(n_fields)]
 add_quantity("Extrema", function=_Extrema, combine_function=_combExtrema,
              n_ret=3)
 
@@ -605,13 +605,13 @@
     """
     ma, maxi, mx, my, mz = -1e90, -1, -1, -1, -1
     if data[field].size > 0:
-        maxi = na.argmax(data[field])
+        maxi = np.argmax(data[field])
         ma = data[field][maxi]
         mx, my, mz = [data[ax][maxi] for ax in 'xyz']
     return (ma, maxi, mx, my, mz)
 def _combMaxLocation(data, *args):
-    args = [na.atleast_1d(arg) for arg in args]
-    i = na.argmax(args[0]) # ma is arg[0]
+    args = [np.atleast_1d(arg) for arg in args]
+    i = np.argmax(args[0]) # ma is arg[0]
     return [arg[i] for arg in args]
 add_quantity("MaxLocation", function=_MaxLocation,
              combine_function=_combMaxLocation, n_ret = 5)
@@ -623,13 +623,13 @@
     """
     ma, mini, mx, my, mz = 1e90, -1, -1, -1, -1
     if data[field].size > 0:
-        mini = na.argmin(data[field])
+        mini = np.argmin(data[field])
         ma = data[field][mini]
         mx, my, mz = [data[ax][mini] for ax in 'xyz']
     return (ma, mini, mx, my, mz)
 def _combMinLocation(data, *args):
-    args = [na.atleast_1d(arg) for arg in args]
-    i = na.argmin(args[0]) # ma is arg[0]
+    args = [np.atleast_1d(arg) for arg in args]
+    i = np.argmin(args[0]) # ma is arg[0]
     return [arg[i] for arg in args]
 add_quantity("MinLocation", function=_MinLocation,
              combine_function=_combMinLocation, n_ret = 5)
@@ -650,8 +650,8 @@
         totals.append(data[field].sum())
     return len(fields), totals
 def _combTotalQuantity(data, n_fields, totals):
-    totals = na.atleast_2d(totals)
+    totals = np.atleast_2d(totals)
     n_fields = totals.shape[1]
-    return [na.sum(totals[:,i]) for i in range(n_fields)]
+    return [np.sum(totals[:,i]) for i in range(n_fields)]
 add_quantity("TotalQuantity", function=_TotalQuantity,
                 combine_function=_combTotalQuantity, n_ret=2)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -30,7 +30,7 @@
 import copy
 import itertools
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -70,22 +70,32 @@
             raise KeyError("No field named %s" % (key,))
         return self.fallback[key]
 
+    name = ""
+
     @classmethod
-    def create_with_fallback(cls, fallback):
+    def create_with_fallback(cls, fallback, name = ""):
         obj = cls()
         obj.fallback = fallback
+        obj.name = name
         return obj
 
     def __contains__(self, key):
         if dict.__contains__(self, key): return True
         if self.fallback is None: return False
-        return self.fallback.has_key(key)
+        return key in self.fallback
 
     def __iter__(self):
-        for f in dict.__iter__(self): yield f
+        for f in dict.__iter__(self):
+            yield f
         if self.fallback:
             for f in self.fallback: yield f
 
+    def keys(self):
+        keys = dict.keys(self)
+        if self.fallback:
+            keys += self.fallback.keys()
+        return keys
+
 def TranslationFunc(field_name):
     def _TranslationFunc(field, data):
         return data[field_name]
@@ -95,6 +105,7 @@
     return
 
 FieldInfo = FieldInfoContainer()
+FieldInfo.name = id(FieldInfo)
 add_field = FieldInfo.add_field
 
 def derived_field(**kwargs):
@@ -150,11 +161,11 @@
         self._spatial = not flat
         self.ActiveDimensions = [nd,nd,nd]
         self.shape = tuple(self.ActiveDimensions)
-        self.size = na.prod(self.ActiveDimensions)
+        self.size = np.prod(self.ActiveDimensions)
         self.LeftEdge = [0.0, 0.0, 0.0]
         self.RightEdge = [1.0, 1.0, 1.0]
-        self.dds = na.ones(3, "float64")
-        self['dx'] = self['dy'] = self['dz'] = na.array([1.0])
+        self.dds = np.ones(3, "float64")
+        self['dx'] = self['dy'] = self['dz'] = np.array([1.0])
         class fake_parameter_file(defaultdict):
             pass
 
@@ -163,8 +174,8 @@
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
                 pf.hubble_constant = pf.cosmological_simulation = 0.0
-            pf.domain_left_edge = na.zeros(3, 'float64')
-            pf.domain_right_edge = na.ones(3, 'float64')
+            pf.domain_left_edge = np.zeros(3, 'float64')
+            pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3
         self.pf = pf
 
@@ -182,12 +193,12 @@
         self.requested_parameters = []
         if not self.flat:
             defaultdict.__init__(self,
-                lambda: na.ones((nd, nd, nd), dtype='float64')
-                + 1e-4*na.random.random((nd, nd, nd)))
+                lambda: np.ones((nd, nd, nd), dtype='float64')
+                + 1e-4*np.random.random((nd, nd, nd)))
         else:
             defaultdict.__init__(self, 
-                lambda: na.ones((nd * nd * nd), dtype='float64')
-                + 1e-4*na.random.random((nd * nd * nd)))
+                lambda: np.ones((nd * nd * nd), dtype='float64')
+                + 1e-4*np.random.random((nd * nd * nd)))
 
     def __missing__(self, item):
         FI = getattr(self.pf, "field_info", FieldInfo)
@@ -217,13 +228,13 @@
         FI = getattr(self.pf, "field_info", FieldInfo)
         if FI.has_key(field_name) and FI[field_name].particle_type:
             self.requested.append(field_name)
-            return na.ones(self.NumberOfParticles)
+            return np.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)
 
     def get_field_parameter(self, param):
         self.requested_parameters.append(param)
-        if param in ['bulk_velocity', 'center', 'height_vector']:
-            return na.random.random(3) * 1e-2
+        if param in ['bulk_velocity', 'center', 'normal']:
+            return np.random.random(3) * 1e-2
         else:
             return 0.0
     _num_ghost_zones = 0


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -27,7 +27,7 @@
 import pdb
 import weakref
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.definitions import x_dict, y_dict
@@ -83,11 +83,11 @@
         if self.Parent == None:
             left = self.LeftEdge - self.pf.domain_left_edge
             start_index = left / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
 
         pdx = self.Parent.dds
         start_index = (self.Parent.get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent.LeftEdge) / pdx)
+                       np.rint((self.LeftEdge - self.Parent.LeftEdge) / pdx)
         self.start_index = (start_index * self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -188,15 +188,15 @@
                 if self.pf.field_info[field].particle_type and \
                    self.NumberOfParticles == 0:
                     # because this gets upcast to float
-                    self[field] = na.array([],dtype='int64')
+                    self[field] = np.array([],dtype='int64')
                     return self.field_data[field]
                 try:
                     temp = self.hierarchy.io.pop(self, field)
-                    self[field] = na.multiply(temp, conv_factor, temp)
+                    self[field] = np.multiply(temp, conv_factor, temp)
                 except self.hierarchy.io._read_exception, exc:
                     if field in self.pf.field_info:
                         if self.pf.field_info[field].not_in_all:
-                            self[field] = na.zeros(self.ActiveDimensions, dtype='float64')
+                            self[field] = np.zeros(self.ActiveDimensions, dtype='float64')
                         else:
                             raise
                     else: raise
@@ -213,14 +213,14 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE - LE) / self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+            self.dds = np.array((RE - LE) / self.ActiveDimensions)
+        if self.pf.dimensionality < 2: self.dds[1] = self.pf.domain_right_edge[1] - self.pf.domain_left_edge[1]
+        if self.pf.dimensionality < 3: self.dds[2] = self.pf.domain_right_edge[2] - self.pf.domain_left_edge[2]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property
     def _corners(self):
-        return na.array([ # Unroll!
+        return np.array([ # Unroll!
             [self.LeftEdge[0],  self.LeftEdge[1],  self.LeftEdge[2]],
             [self.RightEdge[0], self.LeftEdge[1],  self.LeftEdge[2]],
             [self.RightEdge[0], self.RightEdge[1], self.LeftEdge[2]],
@@ -245,9 +245,9 @@
         x = x_dict[axis]
         y = y_dict[axis]
         cond = self.RightEdge[x] >= LE[:,x]
-        cond = na.logical_and(cond, self.LeftEdge[x] <= RE[:,x])
-        cond = na.logical_and(cond, self.RightEdge[y] >= LE[:,y])
-        cond = na.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
+        cond = np.logical_and(cond, self.LeftEdge[x] <= RE[:,x])
+        cond = np.logical_and(cond, self.RightEdge[y] >= LE[:,y])
+        cond = np.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
         return cond
 
     def __repr__(self):
@@ -286,19 +286,19 @@
         self.NumberOfParticles = h.grid_particle_count[my_ind, 0]
 
     def __len__(self):
-        return na.prod(self.ActiveDimensions)
+        return np.prod(self.ActiveDimensions)
 
     def find_max(self, field):
         """ Returns value, index of maximum value of *field* in this grid. """
         coord1d = (self[field] * self.child_mask).argmax()
-        coord = na.unravel_index(coord1d, self[field].shape)
+        coord = np.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
     def find_min(self, field):
         """ Returns value, index of minimum value of *field* in this grid. """
         coord1d = (self[field] * self.child_mask).argmin()
-        coord = na.unravel_index(coord1d, self[field].shape)
+        coord = np.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
@@ -374,11 +374,13 @@
         self._child_index_mask = None
 
     #@time_execution
-    def __fill_child_mask(self, child, mask, tofill):
+    def __fill_child_mask(self, child, mask, tofill, dlevel = 1):
         rf = self.pf.refine_by
+        if dlevel != 1:
+            rf = rf**dlevel
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
-        startIndex = na.maximum(0, cgi / rf - gi)
-        endIndex = na.minimum((cgi + child.ActiveDimensions) / rf - gi,
+        startIndex = np.maximum(0, cgi / rf - gi)
+        endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,
                               self.ActiveDimensions)
         endIndex += (startIndex == endIndex)
         mask[startIndex[0]:endIndex[0],
@@ -391,7 +393,7 @@
         thus, where higher resolution data is available).
 
         """
-        self._child_mask = na.ones(self.ActiveDimensions, 'bool')
+        self._child_mask = np.ones(self.ActiveDimensions, 'bool')
         for child in self.Children:
             self.__fill_child_mask(child, self._child_mask, 0)
         if self.OverlappingSiblings is not None:
@@ -406,7 +408,7 @@
         and otherwise has the ID of the grid that resides there.
 
         """
-        self._child_index_mask = na.zeros(self.ActiveDimensions, 'int32') - 1
+        self._child_index_mask = np.zeros(self.ActiveDimensions, 'int32') - 1
         for child in self.Children:
             self.__fill_child_mask(child, self._child_index_mask,
                                    child.id)
@@ -433,8 +435,8 @@
         Creates self.coords, which is of dimensions (3, ActiveDimensions)
 
         """
-        ind = na.indices(self.ActiveDimensions)
-        left_shaped = na.reshape(self.LeftEdge, (3, 1, 1, 1))
+        ind = np.indices(self.ActiveDimensions)
+        left_shaped = np.reshape(self.LeftEdge, (3, 1, 1, 1))
         self['x'], self['y'], self['z'] = (ind + 0.5) * self.dds + left_shaped
 
     child_mask = property(fget=_get_child_mask, fdel=_del_child_mask)
@@ -470,7 +472,7 @@
         return cube
 
     def get_vertex_centered_data(self, field, smoothed=True, no_ghost=False):
-        new_field = na.zeros(self.ActiveDimensions + 1, dtype='float64')
+        new_field = np.zeros(self.ActiveDimensions + 1, dtype='float64')
 
         if no_ghost:
             of = self[field]
@@ -482,9 +484,9 @@
             new_field[1:,:-1,1:] += of
             new_field[1:,1:,:-1] += of
             new_field[1:,1:,1:] += of
-            na.multiply(new_field, 0.125, new_field)
+            np.multiply(new_field, 0.125, new_field)
             if self.pf.field_info[field].take_log:
-                new_field = na.log10(new_field)
+                new_field = np.log10(new_field)
 
             new_field[:,:, -1] = 2.0*new_field[:,:,-2] - new_field[:,:,-3]
             new_field[:,:, 0]  = 2.0*new_field[:,:,1] - new_field[:,:,2]
@@ -494,31 +496,31 @@
             new_field[0,:,:]  = 2.0*new_field[1,:,:] - new_field[2,:,:]
 
             if self.pf.field_info[field].take_log:
-                na.power(10.0, new_field, new_field)
+                np.power(10.0, new_field, new_field)
         else:
             cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
-            na.add(new_field, cg[field][1: ,1: ,1: ], new_field)
-            na.add(new_field, cg[field][:-1,1: ,1: ], new_field)
-            na.add(new_field, cg[field][1: ,:-1,1: ], new_field)
-            na.add(new_field, cg[field][1: ,1: ,:-1], new_field)
-            na.add(new_field, cg[field][:-1,1: ,:-1], new_field)
-            na.add(new_field, cg[field][1: ,:-1,:-1], new_field)
-            na.add(new_field, cg[field][:-1,:-1,1: ], new_field)
-            na.add(new_field, cg[field][:-1,:-1,:-1], new_field)
-            na.multiply(new_field, 0.125, new_field)
+            np.add(new_field, cg[field][1: ,1: ,1: ], new_field)
+            np.add(new_field, cg[field][:-1,1: ,1: ], new_field)
+            np.add(new_field, cg[field][1: ,:-1,1: ], new_field)
+            np.add(new_field, cg[field][1: ,1: ,:-1], new_field)
+            np.add(new_field, cg[field][:-1,1: ,:-1], new_field)
+            np.add(new_field, cg[field][1: ,:-1,:-1], new_field)
+            np.add(new_field, cg[field][:-1,:-1,1: ], new_field)
+            np.add(new_field, cg[field][:-1,:-1,:-1], new_field)
+            np.multiply(new_field, 0.125, new_field)
 
         return new_field
 
     def icoords(self, dobj):
         mask = self.select(dobj.selector)
-        if mask is None: return na.empty((0,3), dtype='int64')
+        if mask is None: return np.empty((0,3), dtype='int64')
         coords = convert_mask_to_indices(mask, mask.sum())
         coords += self.get_global_startindex()[None, :]
         return coords
 
     def fcoords(self, dobj):
         mask = self.select(dobj.selector)
-        if mask is None: return na.empty((0,3), dtype='float64')
+        if mask is None: return np.empty((0,3), dtype='float64')
         coords = convert_mask_to_indices(mask, mask.sum()).astype("float64")
         coords += 0.5
         coords *= self.dds[None, :]
@@ -527,16 +529,16 @@
 
     def fwidth(self, dobj):
         mask = self.select(dobj.selector)
-        if mask is None: return na.empty((0,3), dtype='float64')
-        coords = na.empty((mask.sum(), 3), dtype='float64')
+        if mask is None: return np.empty((0,3), dtype='float64')
+        coords = np.empty((mask.sum(), 3), dtype='float64')
         for axis in range(3):
             coords[:,axis] = self.dds[axis]
         return coords
 
     def ires(self, dobj):
         mask = self.select(dobj.selector)
-        if mask is None: return na.empty(0, dtype='int64')
-        coords = na.empty(mask.sum(), dtype='int64')
+        if mask is None: return np.empty(0, dtype='int64')
+        coords = np.empty(mask.sum(), dtype='int64')
         coords[:] = self.Level
         return coords
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/image_array.py
--- /dev/null
+++ b/yt/data_objects/image_array.py
@@ -0,0 +1,271 @@
+"""
+ImageArray Class
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+
+Homepage: http://yt-project.org/
+License:
+    Copyright (C) 2012 Samuel Skillman.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+  """
+
+import numpy as np
+import h5py as h5
+from yt.visualization.image_writer import write_bitmap, write_image
+
+class ImageArray(np.ndarray):
+    r"""A custom Numpy ndarray used for images.
+
+    This differs from ndarray in that you can optionally specify an
+    info dictionary which is used later in saving, and can be accessed with
+    ImageArray.info.
+
+    Parameters
+    ----------
+    input_array: array_like
+        A numpy ndarray, or list. 
+
+    Other Parameters
+    ----------------
+    info: dictionary
+        Contains information to be stored with image.
+
+    Returns
+    -------
+    obj: ImageArray object 
+
+    Raises
+    ------
+    None
+
+    See Also
+    --------
+    numpy.ndarray : Inherits
+
+    Notes
+    -----
+
+    References
+    ----------
+
+    Examples
+    --------
+    These are written in doctest format, and should illustrate how to
+    use the function.  Use the variables 'pf' for the parameter file, 'pc' for
+    a plot collection, 'c' for a center, and 'L' for a vector. 
+
+    >>> im = np.zeros([64,128,3])
+    >>> for i in xrange(im.shape[0]):
+    >>>     for k in xrange(im.shape[2]):
+    >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+    >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+    >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+    >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    >>> im_arr = ImageArray(im, info=myinfo)
+    >>> im_arr.save('test_ImageArray')
+
+    Numpy ndarray documentation appended:
+
+    """
+    def __new__(cls, input_array, info=None):
+        # Input array is an already formed ndarray instance
+        # We first cast to be our class type
+        obj = np.asarray(input_array).view(cls)
+        # add the new attribute to the created instance
+        if info is None:
+            info = {}
+        obj.info = info
+        # Finally, we must return the newly created object:
+        return obj
+
+    def __array_finalize__(self, obj):
+        # see InfoArray.__array_finalize__ for comments
+        if obj is None: return
+        self.info = getattr(obj, 'info', None)
+
+    def write_hdf5(self, filename):
+        r"""Writes ImageArray to hdf5 file.
+
+        Parameters
+        ----------
+        filename: string
+            Note filename not be modified.
+       
+        Examples
+        -------- 
+        >>> im = np.zeros([64,128,3])
+        >>> for i in xrange(im.shape[0]):
+        >>>     for k in xrange(im.shape[2]):
+        >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+        >>> im_arr = ImageArray(im, info=myinfo)
+        >>> im_arr.write_hdf5('test_ImageArray.h5')
+
+        """
+        array_name = self.info.get("name","image")
+
+        f = h5.File(filename)
+        if array_name in f.keys():
+            del f[array_name]
+        d = f.create_dataset(array_name, data=self)
+        for k, v in self.info.iteritems():
+            d.attrs.create(k, v)
+        f.close()
+
+    def write_png(self, filename, clip_ratio=None):
+        r"""Writes ImageArray to png file.
+
+        Parameters
+        ----------
+        filename: string
+            Note filename not be modified.
+       
+        Examples
+        --------
+        
+        >>> im = np.zeros([64,128,3])
+        >>> for i in xrange(im.shape[0]):
+        >>>     for k in xrange(im.shape[2]):
+        >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+        >>> im_arr = ImageArray(im, info=myinfo)
+        >>> im_arr.write_png('test_ImageArray.png')
+
+        """
+        if filename[-4:] != '.png': 
+            filename += '.png'
+
+        if clip_ratio is not None:
+            return write_bitmap(self.swapaxes(0, 1), filename,
+                                clip_ratio * self.std())
+        else:
+            return write_bitmap(self.swapaxes(0, 1), filename)
+
+    def write_image(self, filename, color_bounds=None, channel=None,  cmap_name="algae", func=lambda x: x):
+        r"""Writes a single channel of the ImageArray to a png file.
+
+        Parameters
+        ----------
+        filename: string
+            Note filename not be modified.
+       
+        Other Parameters
+        ----------------
+        channel: int
+            Which channel to write out as an image. Defaults to 0
+        cmap_name: string
+            Name of the colormap to be used.
+        color_bounds : tuple of floats, optional
+            The min and max to scale between.  Outlying values will be clipped.
+        cmap_name : string, optional
+            An acceptable colormap.  See either yt.visualization.color_maps or
+            http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
+        func : function, optional
+            A function to transform the buffer before applying a colormap. 
+
+        Returns
+        -------
+        scaled_image : uint8 image that has been saved
+        
+        Examples
+        --------
+        
+        >>> im = np.zeros([64,128])
+        >>> for i in xrange(im.shape[0]):
+        >>>     im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+        >>> im_arr = ImageArray(im, info=myinfo)
+        >>> im_arr.write_image('test_ImageArray.png')
+
+        """
+        if filename[-4:] != '.png': 
+            filename += '.png'
+
+        if channel is None:
+            return write_image(self.swapaxes(0,1), filename, 
+                               color_bounds=color_bounds, cmap_name=cmap_name, 
+                               func=func)
+        else:
+            return write_image(self.swapaxes(0,1)[:,:,channel], filename, 
+                               color_bounds=color_bounds, cmap_name=cmap_name, 
+                               func=func)
+
+    def save(self, filename, png=True, hdf5=True):
+        """
+        Saves ImageArray. 
+
+        Arguments:
+          filename: string
+            This should not contain the extension type (.png, .h5, ...)
+
+        Optional Arguments:
+          png: boolean, default True
+            Save to a png
+
+          hdf5: boolean, default True
+            Save to hdf5 file, including info dictionary as attributes.
+
+        """
+        if png:
+            if len(self.shape) > 2:
+                self.write_png("%s.png" % filename)
+            else:
+                self.write_image("%s.png" % filename)
+        if hdf5:
+            self.write_hdf5("%s.h5" % filename)
+
+    __doc__ += np.ndarray.__doc__
+
+if __name__ == "__main__":
+    im = np.zeros([64,128,3])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    im_arr = ImageArray(im, info=myinfo)
+    im_arr.save('test_3d_ImageArray')
+
+    im = np.zeros([64,128])
+    for i in xrange(im.shape[0]):
+        im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+
+    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    im_arr = ImageArray(im, info=myinfo)
+    im_arr.save('test_2d_ImageArray')
+


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -86,7 +86,7 @@
         for field in fields:
             f = self.pf.field_info[field]
             to_add = f.get_dependencies(pf = self.pf).requested
-            to_add = list(na.unique(to_add))
+            to_add = list(np.unique(to_add))
             if len(to_add) != 1: raise KeyError
             fields_to_read += to_add
             if f._particle_convert_function is None:
@@ -95,9 +95,9 @@
                 func = f.particle_convert
             func = particle_converter(func)
             conv_factors.append(
-              na.fromiter((func(g) for g in grid_list),
+              np.fromiter((func(g) for g in grid_list),
                           count=len(grid_list), dtype='float64'))
-        conv_factors = na.array(conv_factors).transpose()
+        conv_factors = np.array(conv_factors).transpose()
         self.conv_factors = conv_factors
         rvs = self.pf.h.io._read_particles(
             fields_to_read, rtype, args, grid_list, count_list,
@@ -115,9 +115,9 @@
         ParticleIOHandler.__init__(self, pf, source)
 
     def _get_args(self):
-        DLE = na.array(self.pf.domain_left_edge, dtype='float64') 
-        DRE = na.array(self.pf.domain_right_edge, dtype='float64') 
-        args = (na.array(self.left_edge), na.array(self.right_edge), 
+        DLE = np.array(self.pf.domain_left_edge, dtype='float64') 
+        DRE = np.array(self.pf.domain_right_edge, dtype='float64') 
+        args = (np.array(self.left_edge), np.array(self.right_edge), 
                 int(self.periodic), DLE, DRE)
         return (0, args)
 
@@ -140,9 +140,9 @@
         ParticleIOHandler.__init__(self, pf, source)
 
     def _get_args(self):
-        DLE = na.array(self.pf.domain_left_edge, dtype='float64')
-        DRE = na.array(self.pf.domain_right_edge, dtype='float64')
-        return (1, (na.array(self.center, dtype='float64'), self.radius,
+        DLE = np.array(self.pf.domain_left_edge, dtype='float64')
+        DRE = np.array(self.pf.domain_right_edge, dtype='float64')
+        return (1, (np.array(self.center, dtype='float64'), self.radius,
             1, DLE, DRE))
 
 class ParticleIOHandlerDisk(ParticleIOHandlerImplemented):
@@ -156,8 +156,8 @@
         ParticleIOHandler.__init__(self, pf, source)
     
     def _get_args(self):
-        args = (na.array(self.center, dtype='float64'),
-                na.array(self.normal, dtype='float64'),
+        args = (np.array(self.center, dtype='float64'),
+                np.array(self.normal, dtype='float64'),
                 self.radius, self.height)
         return (2, args)
         


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/particle_trajectories.py
--- a/yt/data_objects/particle_trajectories.py
+++ b/yt/data_objects/particle_trajectories.py
@@ -25,7 +25,7 @@
 from yt.utilities.lib import sample_field_at_positions
 from yt.funcs import *
 
-import numpy as na
+import numpy as np
 import h5py
 
 class ParticleTrajectoryCollection(object) :
@@ -112,16 +112,16 @@
         for pf in self.pfs :
             dd = pf.h.all_data()
             newtags = dd["particle_index"].astype("int")
-            if not na.all(na.in1d(indices, newtags, assume_unique=True)) :
+            if not np.all(np.in1d(indices, newtags, assume_unique=True)) :
                 print "Not all requested particle ids contained in this file!"
                 raise IndexError
-            mask = na.in1d(newtags, indices, assume_unique=True)
-            sorts = na.argsort(newtags[mask])
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sorts = np.argsort(newtags[mask])
             self.masks.append(mask)            
             self.sorts.append(sorts)
             self.times.append(pf.current_time)
 
-        self.times = na.array(self.times)
+        self.times = np.array(self.times)
 
         # Set up the derived field list and the particle field list
         # so that if the requested field is a particle field, we'll
@@ -226,7 +226,7 @@
         
         if not self.field_data.has_key(field):
             
-            particles = na.empty((0))
+            particles = np.empty((0))
 
             step = int(0)
                 
@@ -238,13 +238,13 @@
 
                     dd = pf.h.all_data()
                     pfield = dd[field][mask]
-                    particles = na.append(particles, pfield[sort])
+                    particles = np.append(particles, pfield[sort])
 
                 else :
 
                     # This is hard... must loop over grids
 
-                    pfield = na.zeros((self.num_indices))
+                    pfield = np.zeros((self.num_indices))
                     x = self["particle_position_x"][:,step]
                     y = self["particle_position_y"][:,step]
                     z = self["particle_position_z"][:,step]
@@ -258,7 +258,7 @@
                                                             grid.RightEdge,
                                                             x, y, z)
 
-                    particles = na.append(particles, pfield)
+                    particles = np.append(particles, pfield)
 
                 step += 1
                 
@@ -294,9 +294,9 @@
         >>> pl.savefig("orbit")
         """
         
-        mask = na.in1d(self.indices, (index,), assume_unique=True)
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
 
-        if not na.any(mask) :
+        if not np.any(mask) :
             print "The particle index %d is not in the list!" % (index)
             raise IndexError
 
@@ -376,7 +376,7 @@
 
         fields = [field for field in sorted(self.field_data.keys())]
         
-        fid.create_dataset("particle_indices", dtype=na.int32,
+        fid.create_dataset("particle_indices", dtype=np.int32,
                            data=self.indices)
         fid.create_dataset("particle_time", data=self.times)
         


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -26,7 +26,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -119,13 +119,13 @@
         used = self.comm.mpi_allreduce(used, op='sum')
         # When the loop completes the parallel finalizer gets called
         #pbar.finish()
-        ub = na.where(used)
+        ub = np.where(used)
         for field in fields:
             if weight: # Now, at the end, we divide out.
                 data[field][ub] /= weight_data[field][ub]
                 std_data[field][ub] /= weight_data[field][ub]
             self[field] = data[field]
-            #self["%s_std" % field] = na.sqrt(std_data[field])
+            #self["%s_std" % field] = np.sqrt(std_data[field])
         self["UsedBins"] = used
 
         if fractional:
@@ -150,7 +150,7 @@
         data = []
         for field in _field_mapping.get(this_field, (this_field,)):
             data.append(source[field].astype('float64'))
-        return na.concatenate(data, axis=0)
+        return np.concatenate(data, axis=0)
 
     def _fix_pickle(self):
         if isinstance(self._data_source, tuple):
@@ -181,10 +181,10 @@
 
         # Get our bins
         if log_space:
-            func = na.logspace
-            lower_bound, upper_bound = na.log10(lower_bound), na.log10(upper_bound)
+            func = np.logspace
+            lower_bound, upper_bound = np.log10(lower_bound), np.log10(upper_bound)
         else:
-            func = na.linspace
+            func = np.linspace
 
         # These are the bin *edges*
         self._bins = func(lower_bound, upper_bound, n_bins + 1)
@@ -197,7 +197,7 @@
         # and the inverse indices right now.
 
     def _get_empty_field(self):
-        return na.zeros(self[self.bin_field].size, dtype='float64')
+        return np.zeros(self[self.bin_field].size, dtype='float64')
 
     @preserve_source_parameters
     def _bin_field(self, source, field, weight, accumulation,
@@ -206,7 +206,7 @@
         # check_cut is set if source != self._data_source
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -225,7 +225,7 @@
         # weights.  Accumulation likely doesn't work with weighted
         # average fields.
         if accumulation: 
-            binned_field = na.add.accumulate(binned_field)
+            binned_field = np.add.accumulate(binned_field)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -236,7 +236,7 @@
             raise EmptyProfileData()
         # Truncate at boundaries.
         if self.end_collect:
-            mi = na.ones_like(source_data).astype('bool')
+            mi = np.ones_like(source_data).astype('bool')
         else:
             mi = ((source_data > self._bins.min())
                &  (source_data < self._bins.max()))
@@ -244,9 +244,9 @@
         if sd.size == 0:
             raise EmptyProfileData()
         # Stick the bins into our fixed bins, set at initialization
-        bin_indices = na.digitize(sd, self._bins)
+        bin_indices = np.digitize(sd, self._bins)
         if self.end_collect: #limit the range of values to 0 and n_bins-1
-            bin_indices = na.clip(bin_indices, 0, self.n_bins - 1)
+            bin_indices = np.clip(bin_indices, 0, self.n_bins - 1)
         else: #throw away outside values
             bin_indices -= 1
           
@@ -262,7 +262,7 @@
         elif bin_style is 'left': x = x[:-1]
         elif bin_style is 'right': x = x[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
+            if self._x_log: x=np.log10(x)
             x = 0.5*(x[:-1] + x[1:])
             if self._x_log: x=10**x
         else:
@@ -280,11 +280,11 @@
         fields.remove(self.bin_field)
         fid.write("\t".join(["#"] + [self.bin_field] + fields + ["\n"]))
 
-        field_data = na.array(self.choose_bins(bin_style)) 
+        field_data = np.array(self.choose_bins(bin_style)) 
         if bin_style is 'both':
-            field_data = na.append([field_data], na.array([self.field_data[field] for field in fields]), axis=0)
+            field_data = np.append([field_data], np.array([self.field_data[field] for field in fields]), axis=0)
         else: 
-            field_data = na.append([field_data], na.array([self.field_data[field][:-1] for field in fields]), axis=0)
+            field_data = np.append([field_data], np.array([self.field_data[field][:-1] for field in fields]), axis=0)
         
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
@@ -351,24 +351,24 @@
         self.x_n_bins = x_n_bins
         self.y_n_bins = y_n_bins
 
-        func = {True:na.logspace, False:na.linspace}[x_log]
+        func = {True:np.logspace, False:np.linspace}[x_log]
         bounds = fix_bounds(x_lower_bound, x_upper_bound, x_log)
         self._x_bins = func(bounds[0], bounds[1], x_n_bins + 1)
         self[x_bin_field] = self._x_bins
 
-        func = {True:na.logspace, False:na.linspace}[y_log]
+        func = {True:np.logspace, False:np.linspace}[y_log]
         bounds = fix_bounds(y_lower_bound, y_upper_bound, y_log)
         self._y_bins = func(bounds[0], bounds[1], y_n_bins + 1)
         self[y_bin_field] = self._y_bins
 
-        if na.any(na.isnan(self[x_bin_field])) \
-            or na.any(na.isnan(self[y_bin_field])):
+        if np.any(np.isnan(self[x_bin_field])) \
+            or np.any(np.isnan(self[y_bin_field])):
             mylog.error("Your min/max values for x, y have given me a nan.")
             mylog.error("Usually this means you are asking for log, with a zero bound.")
             raise ValueError
 
     def _get_empty_field(self):
-        return na.zeros((self[self.x_bin_field].size,
+        return np.zeros((self[self.x_bin_field].size,
                          self[self.y_bin_field].size), dtype='float64')
 
     @preserve_source_parameters
@@ -376,7 +376,7 @@
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -396,9 +396,9 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -410,9 +410,9 @@
             raise EmptyProfileData()
 
         if self.end_collect:
-            mi = na.arange(source_data_x.size)
+            mi = np.arange(source_data_x.size)
         else:
-            mi = na.where( (source_data_x > self._x_bins.min())
+            mi = np.where( (source_data_x > self._x_bins.min())
                            & (source_data_x < self._x_bins.max())
                            & (source_data_y > self._y_bins.min())
                            & (source_data_y < self._y_bins.max()))
@@ -421,11 +421,11 @@
         if sd_x.size == 0 or sd_y.size == 0:
             raise EmptyProfileData()
 
-        bin_indices_x = na.digitize(sd_x, self._x_bins) - 1
-        bin_indices_y = na.digitize(sd_y, self._y_bins) - 1
+        bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
+        bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
         if self.end_collect:
-            bin_indices_x = na.minimum(na.maximum(1, bin_indices_x), self.x_n_bins) - 1
-            bin_indices_y = na.minimum(na.maximum(1, bin_indices_y), self.y_n_bins) - 1
+            bin_indices_x = np.minimum(np.maximum(1, bin_indices_x), self.x_n_bins) - 1
+            bin_indices_y = np.minimum(np.maximum(1, bin_indices_y), self.y_n_bins) - 1
 
         # Now we set up our inverse bin indices
         return (mi, bin_indices_x, bin_indices_y)
@@ -447,8 +447,8 @@
             x = x[1:]
             y = y[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
-            if self._y_log: y=na.log10(y)
+            if self._x_log: x=np.log10(x)
+            if self._y_log: y=np.log10(y)
             x = 0.5*(x[:-1] + x[1:])
             y = 0.5*(y[:-1] + y[1:])
             if self._x_log: x=10**x
@@ -471,7 +471,7 @@
         fid.write("\t".join(["#"] + [self.x_bin_field, self.y_bin_field]
                           + fields + ["\n"]))
         x,y = self.choose_bins(bin_style)
-        x,y = na.meshgrid(x,y)
+        x,y = np.meshgrid(x,y)
         field_data = [x.ravel(), y.ravel()]
         if bin_style is not 'both':
             field_data += [self.field_data[field][:-1,:-1].ravel() for field in fields
@@ -480,7 +480,7 @@
             field_data += [self.field_data[field].ravel() for field in fields
                            if field not in [self.x_bin_field, self.y_bin_field]]
 
-        field_data = na.array(field_data)
+        field_data = np.array(field_data)
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -519,7 +519,7 @@
         return [self.x_bin_field, self.y_bin_field]
 
 def fix_bounds(upper, lower, logit):
-    if logit: return na.log10(upper), na.log10(lower)
+    if logit: return np.log10(upper), np.log10(lower)
     return upper, lower
 
 class BinnedProfile2DInlineCut(BinnedProfile2D):
@@ -538,7 +538,7 @@
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -556,9 +556,9 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
         return binned_field, weight_field, used_field.astype('bool')
 
         
@@ -593,30 +593,30 @@
         self.y_n_bins = y_n_bins
         self.z_n_bins = z_n_bins
 
-        func = {True:na.logspace, False:na.linspace}[x_log]
+        func = {True:np.logspace, False:np.linspace}[x_log]
         bounds = fix_bounds(x_lower_bound, x_upper_bound, x_log)
         self._x_bins = func(bounds[0], bounds[1], x_n_bins + 1)
         self[x_bin_field] = self._x_bins
 
-        func = {True:na.logspace, False:na.linspace}[y_log]
+        func = {True:np.logspace, False:np.linspace}[y_log]
         bounds = fix_bounds(y_lower_bound, y_upper_bound, y_log)
         self._y_bins = func(bounds[0], bounds[1], y_n_bins + 1)
         self[y_bin_field] = self._y_bins
 
-        func = {True:na.logspace, False:na.linspace}[z_log]
+        func = {True:np.logspace, False:np.linspace}[z_log]
         bounds = fix_bounds(z_lower_bound, z_upper_bound, z_log)
         self._z_bins = func(bounds[0], bounds[1], z_n_bins + 1)
         self[z_bin_field] = self._z_bins
 
-        if na.any(na.isnan(self[x_bin_field])) \
-            or na.any(na.isnan(self[y_bin_field])) \
-            or na.any(na.isnan(self[z_bin_field])):
+        if np.any(np.isnan(self[x_bin_field])) \
+            or np.any(np.isnan(self[y_bin_field])) \
+            or np.any(np.isnan(self[z_bin_field])):
             mylog.error("Your min/max values for x, y or z have given me a nan.")
             mylog.error("Usually this means you are asking for log, with a zero bound.")
             raise ValueError
 
     def _get_empty_field(self):
-        return na.zeros((self[self.x_bin_field].size,
+        return np.zeros((self[self.x_bin_field].size,
                          self[self.y_bin_field].size,
                          self[self.z_bin_field].size), dtype='float64')
 
@@ -624,9 +624,9 @@
     def _bin_field(self, source, field, weight, accumulation,
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
-        weight_data = na.ones(source_data.shape).astype('float64')
+        weight_data = np.ones(source_data.shape).astype('float64')
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape).astype('float64')
+        else: weight_data = np.ones(source_data.shape).astype('float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -646,11 +646,11 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
             if accumulation[2]:
-                binned_field = na.add.accumulate(binned_field, axis=2)
+                binned_field = np.add.accumulate(binned_field, axis=2)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -662,7 +662,7 @@
         if source_data_x.size == 0:
             raise EmptyProfileData()
         if self.end_collect:
-            mi = na.arange(source_data_x.size)
+            mi = np.arange(source_data_x.size)
         else:
             mi = ( (source_data_x > self._x_bins.min())
                  & (source_data_x < self._x_bins.max())
@@ -676,13 +676,13 @@
         if sd_x.size == 0 or sd_y.size == 0 or sd_z.size == 0:
             raise EmptyProfileData()
 
-        bin_indices_x = na.digitize(sd_x, self._x_bins) - 1
-        bin_indices_y = na.digitize(sd_y, self._y_bins) - 1
-        bin_indices_z = na.digitize(sd_z, self._z_bins) - 1
+        bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
+        bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
+        bin_indices_z = np.digitize(sd_z, self._z_bins) - 1
         if self.end_collect:
-            bin_indices_x = na.minimum(na.maximum(1, bin_indices_x), self.x_n_bins) - 1
-            bin_indices_y = na.minimum(na.maximum(1, bin_indices_y), self.y_n_bins) - 1
-            bin_indices_z = na.minimum(na.maximum(1, bin_indices_z), self.z_n_bins) - 1
+            bin_indices_x = np.minimum(np.maximum(1, bin_indices_x), self.x_n_bins) - 1
+            bin_indices_y = np.minimum(np.maximum(1, bin_indices_y), self.y_n_bins) - 1
+            bin_indices_z = np.minimum(np.maximum(1, bin_indices_z), self.z_n_bins) - 1
 
         # Now we set up our inverse bin indices
         return (mi, bin_indices_x, bin_indices_y, bin_indices_z)
@@ -707,9 +707,9 @@
             y = y[1:]
             z = z[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
-            if self._y_log: y=na.log10(y)
-            if self._z_log: z=na.log10(z)
+            if self._x_log: x=np.log10(x)
+            if self._y_log: y=np.log10(y)
+            if self._z_log: z=np.log10(z)
             x = 0.5*(x[:-1] + x[1:])
             y = 0.5*(y[:-1] + y[1:])
             z = 0.5*(z[:-1] + z[1:])
@@ -788,7 +788,7 @@
             if field in set_attr.values(): continue
             order.append(field)
             values.append(self[field].ravel())
-        values = na.array(values).transpose()
+        values = np.array(values).transpose()
         self._data_source.hierarchy.save_data(values, "/Profiles", name,
                                               set_attr, force=force)
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -1,4 +1,4 @@
-"""
+""" 
 Data containers based on geometric selection
 
 Author: Matthew Turk <matthewturk at gmail.com>
@@ -28,13 +28,14 @@
 """
 
 import types
-import numpy as na
+import numpy as np
 from exceptions import ValueError, SyntaxError
 
 from yt.funcs import *
 from yt.utilities.lib import \
     VoxelTraversal, planar_points_in_volume, find_grids_in_inclined_box, \
     grid_points_in_volume
+from yt.utilities.lib.alt_ray_tracers import clyindrical_ray_trace
 from yt.utilities.orientation import Orientation
 from .data_containers import \
     YTSelectionContainer1D, YTSelectionContainer2D, YTSelectionContainer3D
@@ -52,8 +53,8 @@
     _key_fields = ['x','y','z','dx','dy','dz']
     _type_name = "ortho_ray"
     _con_args = ('axis', 'coords')
-    def __init__(self, axis, coords, pf=None, field_parameters = None):
-        """
+    def __init__(self, axis, coords, pf=None, field_parameters=None):
+        """ 
         This is an orthogonal ray cast through the entire domain, at a specific
         coordinate.
 
@@ -102,8 +103,8 @@
     _type_name = "ray"
     _con_args = ('start_point', 'end_point')
     sort_by = 't'
-    def __init__(self, start_point, end_point, pf=None, field_parameters = None):
-        """
+    def __init__(self, start_point, end_point, pf=None, field_parameters=None):
+        """ 
         This is an arbitrarily-aligned ray cast through the entire domain, at a
         specific coordinate.
 
@@ -135,32 +136,55 @@
         >>> print ray["Density"], ray["t"], ray["dts"]
         """
         super(YTRayBase, self).__init__(pf, field_parameters)
-        self.start_point = na.array(start_point, dtype='float64')
-        self.end_point = na.array(end_point, dtype='float64')
+        self.start_point = np.array(start_point, dtype='float64')
+        self.end_point = np.array(end_point, dtype='float64')
         self.vec = self.end_point - self.start_point
-        #self.vec /= na.sqrt(na.dot(self.vec, self.vec))
+        #self.vec /= np.sqrt(np.dot(self.vec, self.vec))
         self._set_center(self.start_point)
         self.set_field_parameter('center', self.start_point)
-        self._dts, self._ts = {}, {}
+        self._dts, self._ts, self._masks = {}, {}, {}
 
     def _get_data_from_grid(self, grid, field):
-        mask = na.logical_and(self._get_cut_mask(grid),
-                              grid.child_mask)
-        if field == 'dts': return self._dts[grid.id][mask]
-        if field == 't': return self._ts[grid.id][mask]
+        if self.pf.geometry == "cylindrical":
+            if grid.id in self._masks:
+                mask = self._masks[grid.id] 
+            else:
+                mask = self._get_cut_mask(grid)
+            ts, dts = self._ts[grid.id], self._dts[grid.id]
+        else:
+            mask = np.logical_and(self._get_cut_mask(grid), grid.child_mask)
+            ts, dts = self._ts[grid.id][mask], self._dts[grid.id][mask]
+
+        if field == 'dts':
+            return dts
+        if field == 't': 
+            return ts
+
         gf = grid[field]
         if not iterable(gf):
-            gf = gf * na.ones(grid.child_mask.shape)
+            gf = gf * np.ones(grid.child_mask.shape)
         return gf[mask]
 
     def _get_cut_mask(self, grid):
-        mask = na.zeros(grid.ActiveDimensions, dtype='int')
-        dts = na.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = na.zeros(grid.ActiveDimensions, dtype='float64')
-        VoxelTraversal(mask, ts, dts, grid.LeftEdge, grid.RightEdge,
-                       grid.dds, self.center, self.vec)
-        self._dts[grid.id] = na.abs(dts)
-        self._ts[grid.id] = na.abs(ts)
+        if self.pf.geometry == "cylindrical":
+            _ = clyindrical_ray_trace(self.start_point, self.end_point, 
+                                      grid.LeftEdge, grid.RightEdge)
+            ts, s, rzt, mask = _
+            dts = np.empty(ts.shape, dtype='float64')
+            dts[0], dts[1:] = 0.0, ts[1:] - ts[:-1]
+            grid['r'], grid['z'], grid['theta'] = rzt[:,0], rzt[:,1], rzt[:,2]
+            grid['s'] = s
+        else:
+            mask = np.zeros(grid.ActiveDimensions, dtype='int')
+            dts = np.zeros(grid.ActiveDimensions, dtype='float64')
+            ts = np.zeros(grid.ActiveDimensions, dtype='float64')
+            VoxelTraversal(mask, ts, dts, grid.LeftEdge, grid.RightEdge,
+                           grid.dds, self.center, self.vec)
+            dts = np.abs(dts) 
+            ts = np.abs(ts)
+        self._dts[grid.id] = dts
+        self._ts[grid.id] = ts
+        self._masks[grid.id] = masks
         return mask
 
 
@@ -260,6 +284,18 @@
     def hub_upload(self):
         self._mrep.upload()
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+               origin='center-window'):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        pw = self._get_pw(fields, center, width, origin, axes_unit, 'Slice')
+        return pw
+
 class YTCuttingPlaneBase(YTSelectionContainer2D):
     _plane = None
     _top_node = "/CuttingPlanes"
@@ -317,14 +353,14 @@
         # ax + by + cz + d = 0
         self.orienter = Orientation(normal, north_vector = north_vector)
         self._norm_vec = self.orienter.normal_vector
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         self._x_vec = self.orienter.unit_vectors[0]
         self._y_vec = self.orienter.unit_vectors[1]
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        vecs = np.identity(3)
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
@@ -333,52 +369,6 @@
     def normal(self):
         return self._norm_vec
 
-    def to_frb(self, width, resolution):
-        r"""This function returns an ObliqueFixedResolutionBuffer generated
-        from this object.
-
-        An ObliqueFixedResolutionBuffer is an object that accepts a
-        variable-resolution 2D object and transforms it into an NxM bitmap that
-        can be plotted, examined or processed.  This is a convenience function
-        to return an FRB directly from an existing 2D data object.  Unlike the
-        corresponding to_frb function for other YTSelectionContainer2D objects, this does
-        not accept a 'center' parameter as it is assumed to be centered at the
-        center of the cutting plane.
-
-        Parameters
-        ----------
-        width : width specifier
-            This can either be a floating point value, in the native domain
-            units of the simulation, or a tuple of the (value, unit) style.
-            This will be the width of the FRB.
-        resolution : int or tuple of ints
-            The number of pixels on a side of the final FRB.
-
-        Returns
-        -------
-        frb : :class:`~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`
-            A fixed resolution buffer, which can be queried for fields.
-
-        Examples
-        --------
-
-        >>> v, c = pf.h.find_max("Density")
-        >>> sp = pf.h.sphere(c, (100.0, 'au'))
-        >>> L = sp.quantities["AngularMomentumVector"]()
-        >>> cutting = pf.h.cutting(L, c)
-        >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)
-        >>> write_image(na.log10(frb["Density"]), 'density_1pc.png')
-        """
-        if iterable(width):
-            w, u = width
-            width = w/self.pf[u]
-        if not iterable(resolution):
-            resolution = (resolution, resolution)
-        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
-        bounds = (-width/2.0, width/2.0, -width/2.0, width/2.0)
-        frb = ObliqueFixedResolutionBuffer(self, bounds, resolution)
-        return frb
-
     def to_frb(self, width, resolution, height=None):
         r"""This function returns an ObliqueFixedResolutionBuffer generated
         from this object.
@@ -415,7 +405,7 @@
         >>> L = sp.quantities["AngularMomentumVector"]()
         >>> cutting = pf.h.cutting(L, c)
         >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)
-        >>> write_image(na.log10(frb["Density"]), 'density_1pc.png')
+        >>> write_image(np.log10(frb["Density"]), 'density_1pc.png')
         """
         if iterable(width):
             w, u = width
@@ -439,7 +429,7 @@
             x = self._current_chunk.fcoords[:,0] - self.center[0]
             y = self._current_chunk.fcoords[:,1] - self.center[1]
             z = self._current_chunk.fcoords[:,2] - self.center[2]
-            tr = na.zeros(self.size, dtype='float64')
+            tr = np.zeros(self.size, dtype='float64')
             tr += x * self._x_vec[0]
             tr += y * self._x_vec[1]
             tr += z * self._x_vec[2]
@@ -448,7 +438,7 @@
             x = self._current_chunk.fcoords[:,0] - self.center[0]
             y = self._current_chunk.fcoords[:,1] - self.center[1]
             z = self._current_chunk.fcoords[:,2] - self.center[2]
-            tr = na.zeros(self.size, dtype='float64')
+            tr = np.zeros(self.size, dtype='float64')
             tr += x * self._y_vec[0]
             tr += y * self._y_vec[1]
             tr += z * self._y_vec[2]
@@ -457,7 +447,7 @@
             x = self._current_chunk.fcoords[:,0] - self.center[0]
             y = self._current_chunk.fcoords[:,1] - self.center[1]
             z = self._current_chunk.fcoords[:,2] - self.center[2]
-            tr = na.zeros(self.size, dtype='float64')
+            tr = np.zeros(self.size, dtype='float64')
             tr += x * self._norm_vec[0]
             tr += y * self._norm_vec[1]
             tr += z * self._norm_vec[2]
@@ -471,6 +461,83 @@
         else:
             raise KeyError(field)
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        normal = self.normal
+        center = self.center
+        if fields == None:
+            if self.fields == None:
+                raise SyntaxError("The fields keyword argument must be set")
+        else:
+            self.fields = ensure_list(fields)
+        from yt.visualization.plot_window import \
+            GetOffAxisBoundsAndCenter, PWViewerMPL
+        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
+        (bounds, center_rot) = GetOffAxisBoundsAndCenter(normal, center, width, self.pf)
+        pw = PWViewerMPL(self, bounds, origin='center-window', periodic=False, oblique=True,
+                         frb_generator=ObliqueFixedResolutionBuffer, plot_type='OffAxisSlice')
+        pw.set_axes_unit(axes_unit)
+        return pw
+
+    def to_frb(self, width, resolution, height=None):
+        r"""This function returns an ObliqueFixedResolutionBuffer generated
+        from this object.
+
+        An ObliqueFixedResolutionBuffer is an object that accepts a
+        variable-resolution 2D object and transforms it into an NxM bitmap that
+        can be plotted, examined or processed.  This is a convenience function
+        to return an FRB directly from an existing 2D data object.  Unlike the
+        corresponding to_frb function for other AMR2DData objects, this does
+        not accept a 'center' parameter as it is assumed to be centered at the
+        center of the cutting plane.
+
+        Parameters
+        ----------
+        width : width specifier
+            This can either be a floating point value, in the native domain
+            units of the simulation, or a tuple of the (value, unit) style.
+            This will be the width of the FRB.
+        height : height specifier, optional
+            This will be the height of the FRB, by default it is equal to width.
+        resolution : int or tuple of ints
+            The number of pixels on a side of the final FRB.
+
+        Returns
+        -------
+        frb : :class:`~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`
+            A fixed resolution buffer, which can be queried for fields.
+
+        Examples
+        --------
+
+        >>> v, c = pf.h.find_max("Density")
+        >>> sp = pf.h.sphere(c, (100.0, 'au'))
+        >>> L = sp.quantities["AngularMomentumVector"]()
+        >>> cutting = pf.h.cutting(L, c)
+        >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)
+        >>> write_image(np.log10(frb["Density"]), 'density_1pc.png')
+        """
+        if iterable(width):
+            w, u = width
+            width = w/self.pf[u]
+        if height is None:
+            height = width
+        elif iterable(height):
+            h, u = height
+            height = h/self.pf[u]
+        if not iterable(resolution):
+            resolution = (resolution, resolution)
+        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
+        bounds = (-width/2.0, width/2.0, -height/2.0, height/2.0)
+        frb = ObliqueFixedResolutionBuffer(self, bounds, resolution)
+        return frb
+
 class YTFixedResCuttingPlaneBase(YTSelectionContainer2D):
     """
     YTFixedResCuttingPlaneBase is an oblique plane through the data,
@@ -498,34 +565,34 @@
         self.width = width
         self.dims = dims
         self.dds = self.width / self.dims
-        self.bounds = na.array([0.0,1.0,0.0,1.0])
+        self.bounds = np.array([0.0,1.0,0.0,1.0])
 
         self.set_field_parameter('center', center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
+        vecs = np.identity(3)
+        _t = np.cross(self._norm_vec, vecs).sum(axis=1)
         ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
+        self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
+        self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
+        self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
 
         # Calculate coordinates of each pixel
         _co = self.dds * \
-              (na.mgrid[-self.dims/2 : self.dims/2,
+              (np.mgrid[-self.dims/2 : self.dims/2,
                         -self.dims/2 : self.dims/2] + 0.5)
-        self._coord = self.center + na.outer(_co[0,:,:], self._x_vec) + \
-                      na.outer(_co[1,:,:], self._y_vec)
-        self._pixelmask = na.ones(self.dims*self.dims, dtype='int8')
+        self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
+                      np.outer(_co[1,:,:], self._y_vec)
+        self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
 
         if node_name is not False:
             if node_name is True: self._deserialize()
@@ -540,11 +607,11 @@
         # within width/2 of the center.
         vertices = self.hierarchy.gridCorners
         # Shape = (8,3,n_grid)
-        D = na.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
-        valid_grids = na.where(na.logical_not(na.all(D<0,axis=0) |
-                                              na.all(D>0,axis=0) ))[0]
+        D = np.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
+        valid_grids = np.where(np.logical_not(np.all(D<0,axis=0) |
+                                              np.all(D>0,axis=0) ))[0]
         # Now restrict these grids to a rect. prism that bounds the slice
-        sliceCorners = na.array([ \
+        sliceCorners = np.array([ \
             self.center + 0.5*self.width * (+self._x_vec + self._y_vec),
             self.center + 0.5*self.width * (+self._x_vec - self._y_vec),
             self.center + 0.5*self.width * (-self._x_vec - self._y_vec),
@@ -552,12 +619,12 @@
         sliceLeftEdge = sliceCorners.min(axis=0)
         sliceRightEdge = sliceCorners.max(axis=0)
         # Check for bounding box and grid overlap
-        leftOverlap = na.less(self.hierarchy.gridLeftEdge[valid_grids],
+        leftOverlap = np.less(self.hierarchy.gridLeftEdge[valid_grids],
                               sliceRightEdge).all(axis=1)
-        rightOverlap = na.greater(self.hierarchy.gridRightEdge[valid_grids],
+        rightOverlap = np.greater(self.hierarchy.gridRightEdge[valid_grids],
                                   sliceLeftEdge).all(axis=1)
         self._grids = self.hierarchy.grids[valid_grids[
-            na.where(leftOverlap & rightOverlap)]]
+            np.where(leftOverlap & rightOverlap)]]
         self._grids = self._grids[::-1]
 
     def _generate_coords(self):
@@ -573,7 +640,7 @@
             pointI = self._get_point_indices(grid)
             if len(pointI) == 0: return
             vc = self._calc_vertex_centered_data(grid, field)
-            bds = na.array(zip(grid.LeftEdge,
+            bds = np.array(zip(grid.LeftEdge,
                                grid.RightEdge)).ravel()
             interp = TrilinearFieldInterpolator(vc, bds, ['x', 'y', 'z'])
             self[field][pointI] = interp( \
@@ -599,27 +666,27 @@
         self.width = width
         self.dds = self.width / self.dims
         self.set_field_parameter('center', center)
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
+        vecs = np.identity(3)
+        _t = np.cross(self._norm_vec, vecs).sum(axis=1)
         ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
+        self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
+        self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
+        self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
+        self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
         # Calculate coordinates of each pixel
         _co = self.dds * \
-              (na.mgrid[-self.dims/2 : self.dims/2,
+              (np.mgrid[-self.dims/2 : self.dims/2,
                         -self.dims/2 : self.dims/2] + 0.5)
 
-        self._coord = self.center + na.outer(_co[0,:,:], self._x_vec) + \
-                      na.outer(_co[1,:,:], self._y_vec)
-        self._pixelmask = na.ones(self.dims*self.dims, dtype='int8')
+        self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
+                      np.outer(_co[1,:,:], self._y_vec)
+        self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
 
     def get_data(self, fields):
         """
@@ -636,15 +703,12 @@
             if field not in self.hierarchy.field_list:
                 if self._generate_field(field):
                     continue # A "True" return means we did it
-            self[field] = na.zeros(_size, dtype='float64')
+            self[field] = np.zeros(_size, dtype='float64')
             for grid in self._get_grids():
                 self._get_data_from_grid(grid, field)
             self[field] = self.comm.mpi_allreduce(\
                 self[field], op='sum').reshape([self.dims]*2).transpose()
 
-    def interpolate_discretize(self, *args, **kwargs):
-        pass
-
     def _calc_vertex_centered_data(self, grid, field):
         #return grid.retrieve_ghost_zones(1, field, smoothed=False)
         return grid.get_vertex_centered_data(field)
@@ -677,34 +741,34 @@
         within the cylinder will be selected.
         """
         YTSelectionContainer3D.__init__(self, center, fields, pf, **kwargs)
-        self._norm_vec = na.array(normal)/na.sqrt(na.dot(normal,normal))
-        self.set_field_parameter("height_vector", self._norm_vec)
+        self._norm_vec = np.array(normal)/np.sqrt(np.dot(normal,normal))
+        self.set_field_parameter("normal", self._norm_vec)
         self._height = fix_length(height, self.pf)
         self._radius = fix_length(radius, self.pf)
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
 
     def _get_list_of_grids(self):
-        H = na.sum(self._norm_vec.reshape((1,3,1)) * self.pf.h.grid_corners,
+        H = np.sum(self._norm_vec.reshape((1,3,1)) * self.pf.h.grid_corners,
                    axis=1) + self._d
-        D = na.sqrt(na.sum((self.pf.h.grid_corners -
+        D = np.sqrt(np.sum((self.pf.h.grid_corners -
                            self.center.reshape((1,3,1)))**2.0,axis=1))
-        R = na.sqrt(D**2.0-H**2.0)
+        R = np.sqrt(D**2.0-H**2.0)
         self._grids = self.hierarchy.grids[
-            ( (na.any(na.abs(H)<self._height,axis=0))
-            & (na.any(R<self._radius,axis=0)
-            & (na.logical_not((na.all(H>0,axis=0) | (na.all(H<0, axis=0)))) )
+            ( (np.any(np.abs(H)<self._height,axis=0))
+            & (np.any(R<self._radius,axis=0)
+            & (np.logical_not((np.all(H>0,axis=0) | (np.all(H<0, axis=0)))) )
             ) ) ]
         self._grids = self.hierarchy.grids
 
     def _is_fully_enclosed(self, grid):
         corners = grid._corners.reshape((8,3,1))
-        H = na.sum(self._norm_vec.reshape((1,3,1)) * corners,
+        H = np.sum(self._norm_vec.reshape((1,3,1)) * corners,
                    axis=1) + self._d
-        D = na.sqrt(na.sum((corners -
+        D = np.sqrt(np.sum((corners -
                            self.center.reshape((1,3,1)))**2.0,axis=1))
-        R = na.sqrt(D**2.0-H**2.0)
-        return (na.all(na.abs(H) < self._height, axis=0) \
-            and na.all(R < self._radius, axis=0))
+        R = np.sqrt(D**2.0-H**2.0)
+        return (np.all(np.abs(H) < self._height, axis=0) \
+            and np.all(R < self._radius, axis=0))
 
     def _get_cut_mask(self, grid):
         if self._is_fully_enclosed(grid):
@@ -714,13 +778,13 @@
               + grid['y'] * self._norm_vec[1] \
               + grid['z'] * self._norm_vec[2] \
               + self._d
-            d = na.sqrt(
+            d = np.sqrt(
                 (grid['x'] - self.center[0])**2.0
               + (grid['y'] - self.center[1])**2.0
               + (grid['z'] - self.center[2])**2.0
                 )
-            r = na.sqrt(d**2.0-h**2.0)
-            cm = ( (na.abs(h) <= self._height)
+            r = np.sqrt(d**2.0-h**2.0)
+            cm = ( (np.abs(h) <= self._height)
                  & (r <= self._radius))
         return cm
 
@@ -738,8 +802,8 @@
         describe the box.  No checks are done to ensure that the box satisfies
         a right-hand rule, but if it doesn't, behavior is undefined.
         """
-        self.origin = na.array(origin)
-        self.box_vectors = na.array(box_vectors, dtype='float64')
+        self.origin = np.array(origin)
+        self.box_vectors = np.array(box_vectors, dtype='float64')
         self.box_lengths = (self.box_vectors**2.0).sum(axis=1)**0.5
         center = origin + 0.5*self.box_vectors.sum(axis=0)
         YTSelectionContainer3D.__init__(self, center, fields, pf, **kwargs)
@@ -749,11 +813,11 @@
         xv = self.box_vectors[0,:]
         yv = self.box_vectors[1,:]
         zv = self.box_vectors[2,:]
-        self._x_vec = xv / na.sqrt(na.dot(xv, xv))
-        self._y_vec = yv / na.sqrt(na.dot(yv, yv))
-        self._z_vec = zv / na.sqrt(na.dot(zv, zv))
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._z_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._x_vec = xv / np.sqrt(np.dot(xv, xv))
+        self._y_vec = yv / np.sqrt(np.dot(yv, yv))
+        self._z_vec = zv / np.sqrt(np.dot(zv, zv))
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._z_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
@@ -771,7 +835,7 @@
                                       grid.RightEdge, grid.dds,
                                       grid.child_mask, 1)
             if v: grids.append(grid)
-        self._grids = na.empty(len(grids), dtype='object')
+        self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
 
 
@@ -784,7 +848,7 @@
     def _get_cut_mask(self, grid):
         if self._is_fully_enclosed(grid):
             return True
-        pm = na.zeros(grid.ActiveDimensions, dtype='int32')
+        pm = np.zeros(grid.ActiveDimensions, dtype='int32')
         grid_points_in_volume(self.box_lengths, self.origin,
                               self._rot_mat, grid.LeftEdge,
                               grid.RightEdge, grid.dds, pm, 0)
@@ -844,13 +908,13 @@
         return True
 
     def _get_cut_mask(self, grid):
-        return na.ones(grid.ActiveDimensions, dtype='bool')
+        return np.ones(grid.ActiveDimensions, dtype='bool')
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.ones(grid.ActiveDimensions, dtype='bool')
+        k = np.ones(grid.ActiveDimensions, dtype='bool')
         if use_child_mask:
             k[grid.child_indices] = False
-        pointI = na.where(k == True)
+        pointI = np.where(k == True)
         return pointI
 
 class YTGridCollectionMaxLevelBase(YTSelectionContainer3D):
@@ -876,13 +940,13 @@
         return True
 
     def _get_cut_mask(self, grid):
-        return na.ones(grid.ActiveDimensions, dtype='bool')
+        return np.ones(grid.ActiveDimensions, dtype='bool')
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.ones(grid.ActiveDimensions, dtype='bool')
+        k = np.ones(grid.ActiveDimensions, dtype='bool')
         if use_child_mask and grid.Level < self.max_level:
             k[grid.child_indices] = False
-        pointI = na.where(k == True)
+        pointI = np.where(k == True)
         return pointI
 
 class YTSphereBase(YTSelectionContainer3D):
@@ -929,7 +993,7 @@
         can define a ellipsoid of any proportion.  Only cells whose centers are
         within the ellipsoid will be selected.
         """
-        AMR3DData.__init__(self, na.array(center), fields, pf, **kwargs)
+        AMR3DData.__init__(self, np.array(center), fields, pf, **kwargs)
         # make sure the smallest side is not smaller than dx
         if C < self.hierarchy.get_smallest_dx():
             raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())
@@ -940,12 +1004,12 @@
         self._tilt = tilt
         
         # find the t1 angle needed to rotate about z axis to align e0 to x
-        t1 = na.arctan(e0[1] / e0[0])
+        t1 = np.arctan(e0[1] / e0[0])
         # rotate e0 by -t1
         RZ = get_rotation_matrix(t1, (0,0,1)).transpose()
         r1 = (e0 * RZ).sum(axis = 1)
         # find the t2 angle needed to rotate about y axis to align e0 to x
-        t2 = na.arctan(-r1[2] / r1[0])
+        t2 = np.arctan(-r1[2] / r1[0])
         """
         calculate the original e1
         given the tilt about the x axis when e0 was aligned 
@@ -957,7 +1021,7 @@
         e1 = ((0, 1, 0) * RX).sum(axis = 1)
         e1 = (e1 * RY).sum(axis = 1)
         e1 = (e1 * RZ).sum(axis = 1)
-        e2 = na.cross(e0, e1)
+        e2 = np.cross(e0, e1)
 
         self._e1 = e1
         self._e2 = e2
@@ -987,7 +1051,7 @@
                                   x.LeftEdge[0], \
                                   x.LeftEdge[1], \
                                   x.LeftEdge[2]))
-        self._grids = na.array(grids, dtype = 'object')
+        self._grids = np.array(grids, dtype = 'object')
 
     def _is_fully_enclosed(self, grid):
         """
@@ -997,18 +1061,18 @@
         vr = (grid._corners - self.center)
         # 3 possible cases of locations taking periodic BC into account
         # just listing the components, find smallest later
-        dotarr=na.array([vr, vr + self.DW, vr - self.DW])
+        dotarr=np.array([vr, vr + self.DW, vr - self.DW])
         # these vrdote# finds the product of vr components with e#
         # square the results
         # find the smallest
         # sums it
-        vrdote0_2 = (na.multiply(dotarr, self._e0)**2).min(axis \
+        vrdote0_2 = (np.multiply(dotarr, self._e0)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        vrdote1_2 = (na.multiply(dotarr, self._e1)**2).min(axis \
+        vrdote1_2 = (np.multiply(dotarr, self._e1)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        vrdote2_2 = (na.multiply(dotarr, self._e2)**2).min(axis \
+        vrdote2_2 = (np.multiply(dotarr, self._e2)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        return na.all(vrdote0_2 / self._A**2 + \
+        return np.all(vrdote0_2 / self._A**2 + \
                       vrdote1_2 / self._B**2 + \
                       vrdote2_2 / self._C**2 <=1.0)
 
@@ -1023,21 +1087,21 @@
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
            and grid.id in self._cut_masks:
             return self._cut_masks[grid.id]
-        Inside = na.zeros(grid["x"].shape, dtype = 'float64')
+        Inside = np.zeros(grid["x"].shape, dtype = 'float64')
         dim = grid["x"].shape
         # need this to take into account non-cube root grid tiles
-        dot_evec = na.zeros([3, dim[0], dim[1], dim[2]])
+        dot_evec = np.zeros([3, dim[0], dim[1], dim[2]])
         for i, ax in enumerate('xyz'):
             # distance to center
             ar  = grid[ax]-self.center[i]
             # cases to take into account periodic BC
-            case = na.array([ar, ar + self.DW[i], ar - self.DW[i]])
+            case = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
             # find which of the 3 cases is smallest in magnitude
-            index = na.abs(case).argmin(axis = 0)
+            index = np.abs(case).argmin(axis = 0)
             # restrict distance to only the smallest cases
-            vec = na.choose(index, case)
+            vec = np.choose(index, case)
             # sum up to get the dot product with e_vectors
-            dot_evec += na.array([vec * self._e0[i], \
+            dot_evec += np.array([vec * self._e0[i], \
                                   vec * self._e1[i], \
                                   vec * self._e2[i]])
         # Calculate the eqn of ellipsoid, if it is inside


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -226,8 +226,6 @@
         if getattr(self, "field_dependencies", None) is None:
             self.field_dependencies = {}
 
-        
-
 def _reconstruct_pf(*args, **kwargs):
     pfs = ParameterFileStore()
     pf = pfs.get_pf_hash(*args)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/tests/test_covering_grid.py
--- /dev/null
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -0,0 +1,28 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_covering_grid():
+    # We decompose in different ways
+    return
+    for level in [0, 1, 2]:
+        for nprocs in [1, 2, 4, 8]:
+            pf = fake_random_pf(16, nprocs = nprocs)
+            dn = pf.refine_by**level 
+            cg = pf.h.covering_grid(level, [0.0, 0.0, 0.0],
+                    dn * pf.domain_dimensions)
+            yield assert_equal, cg["Ones"].max(), 1.0
+            yield assert_equal, cg["Ones"].min(), 1.0
+            yield assert_equal, cg["CellVolume"].sum(), pf.domain_width.prod()
+            for g in pf.h.grids:
+                di = g.get_global_startindex()
+                dd = g.ActiveDimensions
+                for i in range(dn):
+                    f = cg["Density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
+                                      dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
+                                      dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
+                    yield assert_equal, f, g["Density"]


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/tests/test_profiles.py
--- /dev/null
+++ b/yt/data_objects/tests/test_profiles.py
@@ -0,0 +1,73 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+
+_fields = ("Density", "Temperature", "Dinosaurs", "Tribbles")
+
+def test_profiles():
+    pf = fake_random_pf(64, nprocs = 8, fields = _fields)
+    nv = pf.domain_dimensions.prod()
+    dd = pf.h.all_data()
+    (rmi, rma), (tmi, tma), (dmi, dma) = dd.quantities["Extrema"](
+        ["Density", "Temperature", "Dinosaurs"])
+    rt, tt, dt = dd.quantities["TotalQuantity"](
+        ["Density", "Temperature", "Dinosaurs"])
+    # First we look at the 
+    for nb in [8, 16, 32, 64]:
+        # We log all the fields or don't log 'em all.  No need to do them
+        # individually.
+        for lf in [True, False]: 
+            # We have the min and the max, but to avoid cutting them off
+            # since we aren't doing end-collect, we cut a bit off the edges
+            for ec, e1, e2 in [(False, 0.9, 1.1), (True, 1.0, 1.0)]:
+                p1d = BinnedProfile1D(dd, 
+                    nb, "Density", rmi*e1, rma*e2, lf,
+                    end_collect=ec)
+                p1d.add_fields(["Ones", "Temperature"], weight=None)
+                yield assert_equal, p1d["Ones"].sum(), nv
+                yield assert_rel_equal, tt, p1d["Temperature"].sum(), 7
+
+                p2d = BinnedProfile2D(dd, 
+                    nb, "Density", rmi*e1, rma*e2, lf,
+                    nb, "Temperature", tmi*e1, tma*e2, lf,
+                    end_collect=ec)
+                p2d.add_fields(["Ones", "Temperature"], weight=None)
+                yield assert_equal, p2d["Ones"].sum(), nv
+                yield assert_rel_equal, tt, p2d["Temperature"].sum(), 7
+
+                p3d = BinnedProfile3D(dd, 
+                    nb, "Density", rmi*e1, rma*e2, lf,
+                    nb, "Temperature", tmi*e1, tma*e2, lf,
+                    nb, "Dinosaurs", dmi*e1, dma*e2, lf,
+                    end_collect=ec)
+                p3d.add_fields(["Ones", "Temperature"], weight=None)
+                yield assert_equal, p3d["Ones"].sum(), nv
+                yield assert_rel_equal, tt, p3d["Temperature"].sum(), 7
+
+        p1d = BinnedProfile1D(dd, nb, "x", 0.0, 1.0, log_space=False)
+        p1d.add_fields("Ones", weight=None)
+        av = nv / nb
+        yield assert_equal, p1d["Ones"][:-1], np.ones(nb)*av
+        # We re-bin ones with a weight now
+        p1d.add_fields(["Ones"], weight="Temperature")
+        yield assert_equal, p1d["Ones"][:-1], np.ones(nb)
+
+        p2d = BinnedProfile2D(dd, nb, "x", 0.0, 1.0, False,
+                                  nb, "y", 0.0, 1.0, False)
+        p2d.add_fields("Ones", weight=None)
+        av = nv / nb**2
+        yield assert_equal, p2d["Ones"][:-1,:-1], np.ones((nb, nb))*av
+        # We re-bin ones with a weight now
+        p2d.add_fields(["Ones"], weight="Temperature")
+        yield assert_equal, p2d["Ones"][:-1,:-1], np.ones((nb, nb))
+
+        p3d = BinnedProfile3D(dd, nb, "x", 0.0, 1.0, False,
+                                  nb, "y", 0.0, 1.0, False,
+                                  nb, "z", 0.0, 1.0, False)
+        p3d.add_fields("Ones", weight=None)
+        av = nv / nb**3
+        yield assert_equal, p3d["Ones"][:-1,:-1,:-1], np.ones((nb, nb, nb))*av
+        # We re-bin ones with a weight now
+        p3d.add_fields(["Ones"], weight="Temperature")
+        yield assert_equal, p3d["Ones"][:-1,:-1,:-1], np.ones((nb,nb,nb))
+


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/tests/test_projection.py
--- /dev/null
+++ b/yt/data_objects/tests/test_projection.py
@@ -0,0 +1,39 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_projection():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        dd = pf.h.all_data()
+        rho_tot = dd.quantities["TotalQuantity"]("Density")[0]
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        # Some simple projection tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                proj = pf.h.proj(["Ones", "Density"], ax, weight_field = wf)
+                yield assert_equal, proj["Ones"].sum(), proj["Ones"].size
+                yield assert_equal, proj["Ones"].min(), 1.0
+                yield assert_equal, proj["Ones"].max(), 1.0
+                yield assert_equal, np.unique(proj["px"]), uc[xax]
+                yield assert_equal, np.unique(proj["py"]), uc[yax]
+                yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+            # wf == None
+            yield assert_equal, wf, None
+            v1 = proj["Density"].sum()
+            v2 = (dd["Density"] * dd["d%s" % an]).sum()
+            yield assert_rel_equal, v1, v2, 10


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -26,13 +26,13 @@
 """
 
 import types
-import numpy as na
+import numpy as np
 import inspect
 import copy
 
 from yt.funcs import *
 
-from yt.utilities.lib import CICDeposit_3, obtain_rvec
+from yt.utilities.lib import CICDeposit_3, obtain_rvec, obtain_rv_vec
 from yt.utilities.cosmology import Cosmology
 from field_info_container import \
     add_field, \
@@ -54,70 +54,82 @@
      kboltz, \
      G, \
      rho_crit_now, \
-     speed_of_light_cgs
+     speed_of_light_cgs, \
+     km_per_cm
+
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
      
 # Note that, despite my newfound efforts to comply with PEP-8,
 # I violate it here in order to keep the name/func_name relationship
 
 def _dx(field, data):
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
 add_field('dx', function=_dx, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _dy(field, data):
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
 add_field('dy', function=_dy, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _dz(field, data):
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
 add_field('dz', function=_dz,
           display_field=False, validators=[ValidateSpatial(0)])
 
 def _coordX(field, data):
     dim = data.ActiveDimensions[0]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[0])[:,None,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[0])[:,None,None]
             +0.5) * data['dx'] + data.LeftEdge[0]
 add_field('x', function=_coordX, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _coordY(field, data):
     dim = data.ActiveDimensions[1]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[1])[None,:,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[1])[None,:,None]
             +0.5) * data['dy'] + data.LeftEdge[1]
 add_field('y', function=_coordY, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _coordZ(field, data):
     dim = data.ActiveDimensions[2]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[2])[None,None,:]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[2])[None,None,:]
             +0.5) * data['dz'] + data.LeftEdge[2]
 add_field('z', function=_coordZ, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _GridLevel(field, data):
-    return na.ones(data.ActiveDimensions)*(data.Level)
+    return np.ones(data.ActiveDimensions)*(data.Level)
 add_field("GridLevel", function=_GridLevel,
           validators=[ValidateGridType(),
                       ValidateSpatial(0)])
 
 def _GridIndices(field, data):
-    return na.ones(data["Ones"].shape)*(data.id-data._id_offset)
+    return np.ones(data["Ones"].shape)*(data.id-data._id_offset)
 add_field("GridIndices", function=_GridIndices,
           validators=[ValidateGridType(),
                       ValidateSpatial(0)], take_log=False)
 
 def _OnesOverDx(field, data):
-    return na.ones(data["Ones"].shape,
+    return np.ones(data["Ones"].shape,
                    dtype=data["Density"].dtype)/data['dx']
 add_field("OnesOverDx", function=_OnesOverDx,
           display_field=False)
 
 def _Ones(field, data):
-    return na.ones(data.shape, dtype='float64')
+    return np.ones(data.shape, dtype='float64')
 add_field("Ones", function=_Ones,
           projection_conversion="unitary",
           display_field = False)
@@ -125,7 +137,7 @@
 
 def _SoundSpeed(field, data):
     if data.pf["EOSType"] == 1:
-        return na.ones(data["Density"].shape, dtype='float64') * \
+        return np.ones(data["Density"].shape, dtype='float64') * \
                 data.pf["EOSSoundSpeed"]
     return ( data.pf["Gamma"]*data["Pressure"] / \
              data["Density"] )**(1.0/2.0)
@@ -134,7 +146,7 @@
 
 def _RadialMachNumber(field, data):
     """M{|v|/t_sound}"""
-    return na.abs(data["RadialVelocity"]) / data["SoundSpeed"]
+    return np.abs(data["RadialVelocity"]) / data["SoundSpeed"]
 add_field("RadialMachNumber", function=_RadialMachNumber)
 
 def _MachNumber(field, data):
@@ -152,7 +164,7 @@
     t3 = data['dz'] / (
         data["SoundSpeed"] + \
         abs(data["z-velocity"]))
-    return na.minimum(na.minimum(t1,t2),t3)
+    return np.minimum(np.minimum(t1,t2),t3)
 def _convertCourantTimeStep(data):
     # SoundSpeed and z-velocity are in cm/s, dx is in code
     return data.convert("cm")
@@ -164,7 +176,7 @@
     """M{|v|}"""
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     return ( (data["particle_velocity_x"]-bulk_velocity[0])**2.0 + \
              (data["particle_velocity_y"]-bulk_velocity[1])**2.0 + \
              (data["particle_velocity_z"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
@@ -174,28 +186,17 @@
 
 def _VelocityMagnitude(field, data):
     """M{|v|}"""
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
-    return ( (data["x-velocity"]-bulk_velocity[0])**2.0 + \
-             (data["y-velocity"]-bulk_velocity[1])**2.0 + \
-             (data["z-velocity"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
+    velocities = obtain_rv_vec(data)
+    return np.sqrt(np.sum(velocities**2,axis=0))
 add_field("VelocityMagnitude", function=_VelocityMagnitude,
           take_log=False, units=r"\rm{cm}/\rm{s}")
 
 def _TangentialOverVelocityMagnitude(field, data):
-    return na.abs(data["TangentialVelocity"])/na.abs(data["VelocityMagnitude"])
+    return np.abs(data["TangentialVelocity"])/np.abs(data["VelocityMagnitude"])
 add_field("TangentialOverVelocityMagnitude",
           function=_TangentialOverVelocityMagnitude,
           take_log=False)
 
-def _TangentialVelocity(field, data):
-    return na.sqrt(data["VelocityMagnitude"]**2.0
-                 - data["RadialVelocity"]**2.0)
-add_field("TangentialVelocity", 
-          function=_TangentialVelocity,
-          take_log=False, units=r"\rm{cm}/\rm{s}")
-
 def _Pressure(field, data):
     """M{(Gamma-1.0)*rho*E}"""
     return (data.pf["Gamma"] - 1.0) * \
@@ -212,50 +213,178 @@
 add_field("Entropy", units=r"\rm{ergs}\ \rm{cm}^{3\gamma-3}",
           function=_Entropy)
 
+
+
+### spherical coordinates: r (radius)
+def _sph_r(field, data):
+    center = data.get_field_parameter("center")
+      
+    coords = obtain_rvec(data).transpose()
+
+    return get_sph_r(vectors, center)
+
+def _Convert_sph_r_CGS(data):
+   return data.convert("cm")
+
+add_field("sph_r", function=_sph_r,
+         validators=[ValidateParameter("center")],
+         convert_function = _Convert_sph_r_CGS, units=r"\rm{cm}")
+
+
+### spherical coordinates: theta (angle with respect to normal)
+def _sph_theta(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = obtain_rvec(data).transpose()
+
+    return get_sph_theta(coords, normal)
+
+add_field("sph_theta", function=_sph_theta,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+### spherical coordinates: phi (angle in the plane perpendicular to the normal)
+def _sph_phi(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = obtain_rvec(data).transpose()
+
+    return get_sph_phi(coords, normal)
+
+add_field("sph_phi", function=_sph_phi,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+### cylindrical coordinates: R (radius in the cylinder's plane)
+def _cyl_R(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+      
+    coords = obtain_rvec(data).transpose()
+
+    return get_cyl_r(coords, normal)
+
+def _Convert_cyl_R_CGS(data):
+   return data.convert("cm")
+
+add_field("cyl_R", function=_cyl_R,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")],
+         convert_function = _Convert_cyl_R_CGS, units=r"\rm{cm}")
+add_field("cyl_RCode", function=_cyl_R,
+          validators=[ValidateParameter("center"),ValidateParameter("normal")],
+          units=r"Radius (code)")
+
+
+### cylindrical coordinates: z (height above the cylinder's plane)
+def _cyl_z(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = obtain_rvec(data).transpose()
+
+    return get_cyl_z(coords, normal)
+
+def _Convert_cyl_z_CGS(data):
+   return data.convert("cm")
+
+add_field("cyl_z", function=_cyl_z,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")],
+         convert_function = _Convert_cyl_z_CGS, units=r"\rm{cm}")
+
+
+### cylindrical coordinates: theta (angle in the cylinder's plane)
+def _cyl_theta(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = obtain_rvec(data).transpose()
+
+    return get_cyl_theta(coords, normal)
+
+add_field("cyl_theta", function=_cyl_theta,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+### The old field DiskAngle is the same as the spherical coordinates'
+### 'theta' angle. I'm keeping DiskAngle for backwards compatibility.
+def _DiskAngle(field, data):
+    return data['sph_theta']
+
+add_field("DiskAngle", function=_DiskAngle,
+          take_log=False,
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
+          display_field=False)
+
+
+### The old field Height is the same as the cylindrical coordinates' z
+### field. I'm keeping Height for backwards compatibility.
 def _Height(field, data):
-    # We take the dot product of the radius vector with the height-vector
-    center = data.get_field_parameter("center")
-    r_vec = na.array([data["x"] - center[0],
-                      data["y"] - center[1],
-                      data["z"] - center[2]])
-    h_vec = na.array(data.get_field_parameter("height_vector"))
-    h_vec = h_vec / na.sqrt(h_vec[0]**2.0+
-                            h_vec[1]**2.0+
-                            h_vec[2]**2.0)
-    height = r_vec[0,:] * h_vec[0] \
-           + r_vec[1,:] * h_vec[1] \
-           + r_vec[2,:] * h_vec[2]
-    return na.abs(height)
+    return data['cyl_z']
+
 def _convertHeight(data):
     return data.convert("cm")
 def _convertHeightAU(data):
     return data.convert("au")
 add_field("Height", function=_Height,
           convert_function=_convertHeight,
-          validators=[ValidateParameter("height_vector")],
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
           units=r"cm", display_field=False)
 add_field("HeightAU", function=_Height,
           convert_function=_convertHeightAU,
-          validators=[ValidateParameter("height_vector")],
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
           units=r"AU", display_field=False)
 
-def _DiskAngle(field, data):
-    # We make both r_vec and h_vec into unit vectors
-    center = data.get_field_parameter("center")
-    r_vec = na.array([data["x"] - center[0],
-                      data["y"] - center[1],
-                      data["z"] - center[2]])
-    r_vec = r_vec/na.sqrt((r_vec**2.0).sum(axis=0))
-    h_vec = na.array(data.get_field_parameter("height_vector"))
-    dp = r_vec[0,:] * h_vec[0] \
-       + r_vec[1,:] * h_vec[1] \
-       + r_vec[2,:] * h_vec[2]
-    return na.arccos(dp)
-add_field("DiskAngle", function=_DiskAngle,
-          take_log=False,
-          validators=[ValidateParameter("height_vector"),
-                      ValidateParameter("center")],
-          display_field=False)
+def _cyl_RadialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data).transpose()
+
+    theta = np.tile(data['cyl_theta'], (3, 1)).transpose()
+
+    return get_cyl_r_component(velocities, theta, normal)
+
+def _cyl_RadialVelocityABS(field, data):
+    return np.abs(_cyl_RadialVelocity(field, data))
+def _Convert_cyl_RadialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_RadialVelocity", function=_cyl_RadialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityABS", function=_cyl_RadialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMS", function=_cyl_RadialVelocity,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMSABS", function=_cyl_RadialVelocityABS,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+
+def _cyl_TangentialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data).transpose()
+    theta = np.tile(data['cyl_theta'], (3, 1)).transpose()
+
+    return get_cyl_theta_component(velocities, theta, normal)
+
+def _cyl_TangentialVelocityABS(field, data):
+    return np.abs(_cyl_TangentialVelocity(field, data))
+def _Convert_cyl_TangentialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_TangentialVelocity", function=_cyl_TangentialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityABS", function=_cyl_TangentialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMS", function=_cyl_TangentialVelocity,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMSABS", function=_cyl_TangentialVelocityABS,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
 
 def _DynamicalTime(field, data):
     """
@@ -263,7 +392,7 @@
     M{sqrt(3pi/(16*G*rho))} or M{sqrt(3pi/(16G))*rho^-(1/2)}
     Note that we return in our natural units already
     """
-    return (3.0*na.pi/(16*G*data["Density"]))**(1./2.)
+    return (3.0*np.pi/(16*G*data["Density"]))**(1./2.)
 add_field("DynamicalTime", function=_DynamicalTime,
            units=r"\rm{s}")
 
@@ -366,7 +495,7 @@
     if data['dx'].size == 1:
         try:
             return data['dx']*data['dy']*data['dx']*\
-                na.ones(data.ActiveDimensions, dtype='float64')
+                np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
             return data['dx']*data['dy']*data['dx']
     return data["dx"]*data["dy"]*data["dz"]
@@ -384,7 +513,7 @@
           convert_function=_ConvertCellVolumeCGS)
 
 def _ChandraEmissivity(field, data):
-    logT0 = na.log10(data["Temperature"]) - 7
+    logT0 = np.log10(data["Temperature"]) - 7
     return ((data["NumberDensity"].astype('float64')**2.0) \
             *(10**(-0.0103*logT0**8 \
                    +0.0417*logT0**7 \
@@ -443,15 +572,15 @@
 
 def _AveragedDensity(field, data):
     nx, ny, nz = data["Density"].shape
-    new_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')
-    weight_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')
-    i_i, j_i, k_i = na.mgrid[0:3,0:3,0:3]
+    new_field = np.zeros((nx-2,ny-2,nz-2), dtype='float64')
+    weight_field = np.zeros((nx-2,ny-2,nz-2), dtype='float64')
+    i_i, j_i, k_i = np.mgrid[0:3,0:3,0:3]
     for i,j,k in zip(i_i.ravel(),j_i.ravel(),k_i.ravel()):
         sl = [slice(i,nx-(2-i)),slice(j,ny-(2-j)),slice(k,nz-(2-k))]
         new_field += data["Density"][sl] * data["CellMass"][sl]
         weight_field += data["CellMass"][sl]
     # Now some fancy footwork
-    new_field2 = na.zeros((nx,ny,nz))
+    new_field2 = np.zeros((nx,ny,nz))
     new_field2[1:-1,1:-1,1:-1] = new_field/weight_field
     return new_field2
 add_field("AveragedDensity",
@@ -479,7 +608,7 @@
         ds = div_fac * data['dz'].flat[0]
         f += data["z-velocity"][1:-1,1:-1,sl_right]/ds
         f -= data["z-velocity"][1:-1,1:-1,sl_left ]/ds
-    new_field = na.zeros(data["x-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["x-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = f
     return new_field
 def _convertDivV(data):
@@ -491,12 +620,12 @@
           convert_function=_convertDivV)
 
 def _AbsDivV(field, data):
-    return na.abs(data['DivV'])
+    return np.abs(data['DivV'])
 add_field("AbsDivV", function=_AbsDivV,
           units=r"\rm{s}^{-1}")
 
 def _Contours(field, data):
-    return -na.ones_like(data["Ones"])
+    return -np.ones_like(data["Ones"])
 add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
           display_field=False, function=_Contours)
 add_field("tempContours", function=_Contours,
@@ -504,13 +633,7 @@
           take_log=False, display_field=False)
 
 def obtain_velocities(data):
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
-    xv = data["x-velocity"] - bv[0]
-    yv = data["y-velocity"] - bv[1]
-    zv = data["z-velocity"] - bv[2]
-    return xv, yv, zv
+    return obtain_rv_vec(data)
 
 def _convertSpecificAngularMomentum(data):
     return data.convert("cm")
@@ -558,24 +681,24 @@
     """
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     xv = data["particle_velocity_x"] - bv[0]
     yv = data["particle_velocity_y"] - bv[1]
     zv = data["particle_velocity_z"] - bv[2]
     center = data.get_field_parameter('center')
-    coords = na.array([data['particle_position_x'],
+    coords = np.array([data['particle_position_x'],
                        data['particle_position_y'],
                        data['particle_position_z']], dtype='float64')
     new_shape = tuple([3] + [1]*(len(coords.shape)-1))
-    r_vec = coords - na.reshape(center,new_shape)
-    v_vec = na.array([xv,yv,zv], dtype='float64')
-    return na.cross(r_vec, v_vec, axis=0)
+    r_vec = coords - np.reshape(center,new_shape)
+    v_vec = np.array([xv,yv,zv], dtype='float64')
+    return np.cross(r_vec, v_vec, axis=0)
 #add_field("ParticleSpecificAngularMomentum",
 #          function=_ParticleSpecificAngularMomentum, particle_type=True,
 #          convert_function=_convertSpecificAngularMomentum, vector_field=True,
 #          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter('center')])
 def _convertSpecificAngularMomentumKMSMPC(data):
-    return data.convert("mpc")/1e5
+    return km_per_cm*data.convert("mpc")
 #add_field("ParticleSpecificAngularMomentumKMSMPC",
 #          function=_ParticleSpecificAngularMomentum, particle_type=True,
 #          convert_function=_convertSpecificAngularMomentumKMSMPC, vector_field=True,
@@ -584,7 +707,7 @@
 def _ParticleSpecificAngularMomentumX(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     y = data["particle_position_y"] - center[1]
     z = data["particle_position_z"] - center[2]
@@ -594,7 +717,7 @@
 def _ParticleSpecificAngularMomentumY(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     x = data["particle_position_x"] - center[0]
     z = data["particle_position_z"] - center[2]
@@ -604,7 +727,7 @@
 def _ParticleSpecificAngularMomentumZ(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     x = data["particle_position_x"] - center[0]
     y = data["particle_position_y"] - center[1]
@@ -652,20 +775,20 @@
 def _ParticleRadius(field, data):
     center = data.get_field_parameter("center")
     DW = data.pf.domain_right_edge - data.pf.domain_left_edge
-    radius = na.zeros(data["particle_position_x"].shape, dtype='float64')
+    radius = np.zeros(data["particle_position_x"].shape, dtype='float64')
     for i, ax in enumerate('xyz'):
-        r = na.abs(data["particle_position_%s" % ax] - center[i])
-        radius += na.minimum(r, na.abs(DW[i]-r))**2.0
-    na.sqrt(radius, radius)
+        r = np.abs(data["particle_position_%s" % ax] - center[i])
+        radius += np.minimum(r, np.abs(DW[i]-r))**2.0
+    np.sqrt(radius, radius)
     return radius
 def _Radius(field, data):
     center = data.get_field_parameter("center")
     DW = data.pf.domain_right_edge - data.pf.domain_left_edge
-    radius = na.zeros(data["x"].shape, dtype='float64')
+    radius = np.zeros(data["x"].shape, dtype='float64')
     for i, ax in enumerate('xyz'):
-        r = na.abs(data[ax] - center[i])
-        radius += na.minimum(r, na.abs(DW[i]-r))**2.0
-    na.sqrt(radius, radius)
+        r = np.abs(data[ax] - center[i])
+        radius += np.minimum(r, np.abs(DW[i]-r))**2.0
+    np.sqrt(radius, radius)
     return radius
 def _ConvertRadiusCGS(data):
     return data.convert("cm")
@@ -747,43 +870,42 @@
           display_name = "Radius (code)")
 
 def _RadialVelocity(field, data):
-    center = data.get_field_parameter("center")
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
-    new_field = ( (data['x']-center[0])*(data["x-velocity"]-bulk_velocity[0])
-                + (data['y']-center[1])*(data["y-velocity"]-bulk_velocity[1])
-                + (data['z']-center[2])*(data["z-velocity"]-bulk_velocity[2])
-                )/data["RadiusCode"]
-    if na.any(na.isnan(new_field)): # to fix center = point
-        new_field[na.isnan(new_field)] = 0.0
-    return new_field
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data).transpose()    
+    theta = np.tile(data['sph_theta'], (3, 1)).transpose()
+    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+
+    return get_sph_r_component(velocities, theta, phi, normal)
+
 def _RadialVelocityABS(field, data):
-    return na.abs(_RadialVelocity(field, data))
+    return np.abs(_RadialVelocity(field, data))
 def _ConvertRadialVelocityKMS(data):
-    return 1e-5
+    return km_per_cm
 add_field("RadialVelocity", function=_RadialVelocity,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityABS", function=_RadialVelocityABS,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityKMS", function=_RadialVelocity,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
 add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
+
+def _TangentialVelocity(field, data):
+    return np.sqrt(data["VelocityMagnitude"]**2.0
+                 - data["RadialVelocity"]**2.0)
+add_field("TangentialVelocity", 
+          function=_TangentialVelocity,
+          take_log=False, units=r"\rm{cm}/\rm{s}")
 
 def _CuttingPlaneVelocityX(field, data):
     x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
                            for ax in 'xyz']
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
-    v_vec = na.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,na.newaxis]
-    return na.dot(x_vec, v_vec)
+        bulk_velocity = np.zeros(3)
+    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
+                - bulk_velocity[...,np.newaxis]
+    return np.dot(x_vec, v_vec)
 add_field("CuttingPlaneVelocityX", 
           function=_CuttingPlaneVelocityX,
           validators=[ValidateParameter("cp_%s_vec" % ax)
@@ -793,15 +915,34 @@
                            for ax in 'xyz']
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
-    v_vec = na.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,na.newaxis]
-    return na.dot(y_vec, v_vec)
+        bulk_velocity = np.zeros(3)
+    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
+                - bulk_velocity[...,np.newaxis]
+    return np.dot(y_vec, v_vec)
 add_field("CuttingPlaneVelocityY", 
           function=_CuttingPlaneVelocityY,
           validators=[ValidateParameter("cp_%s_vec" % ax)
                       for ax in 'xyz'], units=r"\rm{km}/\rm{s}")
 
+def _CuttingPlaneBx(field, data):
+    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+                           for ax in 'xyz']
+    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+    return np.dot(x_vec, b_vec)
+add_field("CuttingPlaneBx", 
+          function=_CuttingPlaneBx,
+          validators=[ValidateParameter("cp_%s_vec" % ax)
+                      for ax in 'xyz'], units=r"\rm{Gauss}")
+def _CuttingPlaneBy(field, data):
+    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+                           for ax in 'xyz']
+    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+    return np.dot(y_vec, b_vec)
+add_field("CuttingPlaneBy", 
+          function=_CuttingPlaneBy,
+          validators=[ValidateParameter("cp_%s_vec" % ax)
+                      for ax in 'xyz'], units=r"\rm{Gauss}")
+
 def _MeanMolecularWeight(field,data):
     return (data["Density"] / (mh *data["NumberDensity"]))
 add_field("MeanMolecularWeight",function=_MeanMolecularWeight,units=r"")
@@ -819,16 +960,16 @@
 def _convertDensity(data):
     return data.convert("Density")
 def _pdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
-    CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                 data["particle_position_y"].astype(na.float64),
-                 data["particle_position_z"].astype(na.float64),
-                 data["particle_mass"].astype(na.float32),
-                 na.int64(data.NumberOfParticles),
-                 blank, na.array(data.LeftEdge).astype(na.float64),
-                 na.array(data.ActiveDimensions).astype(na.int32),
-                 na.float64(data['dx']))
+    CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                 data["particle_position_y"].astype(np.float64),
+                 data["particle_position_z"].astype(np.float64),
+                 data["particle_mass"].astype(np.float32),
+                 np.int64(data.NumberOfParticles),
+                 blank, np.array(data.LeftEdge).astype(np.float64),
+                 np.array(data.ActiveDimensions).astype(np.int32),
+                 np.float64(data['dx']))
     return blank
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()], convert_function=_convertDensity,
@@ -839,12 +980,78 @@
     units of Gauss. If you use MKS, make sure to write your own
     MagneticEnergy field to deal with non-unitary \mu_0.
     """
-    return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/2.
+    return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/(8*np.pi)
 add_field("MagneticEnergy",function=_MagneticEnergy,
-          units=r"",
-          validators = [ValidateDataField("Bx"),
-                        ValidateDataField("By"),
-                        ValidateDataField("Bz")])
+          units=r"\rm{ergs}\/\rm{cm}^{-3}",
+          display_name=r"\rm{Magnetic}\/\rm{Energy}")
+
+def _BMagnitude(field,data):
+    """This assumes that your front end has provided Bx, By, Bz in
+    units of Gauss. If you use MKS, make sure to write your own
+    BMagnitude field to deal with non-unitary \mu_0.
+    """
+    return np.sqrt((data["Bx"]**2 + data["By"]**2 + data["Bz"]**2))
+add_field("BMagnitude",
+          function=_BMagnitude,
+          display_name=r"|B|", units=r"\rm{Gauss}")
+
+def _PlasmaBeta(field,data):
+    """This assumes that your front end has provided Bx, By, Bz in
+    units of Gauss. If you use MKS, make sure to write your own
+    PlasmaBeta field to deal with non-unitary \mu_0.
+    """
+    return data['Pressure']/data['MagneticEnergy']
+add_field("PlasmaBeta",
+          function=_PlasmaBeta,
+          display_name=r"\rm{Plasma}\/\beta", units="")
+
+def _MagneticPressure(field,data):
+    return data['MagneticEnergy']
+add_field("MagneticPressure",
+          function=_MagneticPressure,
+          display_name=r"\rm{Magnetic}\/\rm{Energy}",
+          units="\rm{ergs}\/\rm{cm}^{-3}")
+
+def _BPoloidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = np.tile(data['sph_theta'], (3, 1)).transpose()
+    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+
+    return get_sph_theta_component(Bfields, theta, phi, normal)
+
+add_field("BPoloidal", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BToroidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+
+    return get_sph_phi_component(Bfields, phi, normal)
+
+add_field("BToroidal", function=_BToroidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BRadial(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = np.tile(data['sph_theta'], (3, 1)).transpose()
+    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+
+    return get_sph_r_component(Bfields, theta, phi, normal)
+
+add_field("BRadial", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
 
 def _VorticitySquared(field, data):
     mylog.debug("Generating vorticity on %s", data)
@@ -857,7 +1064,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["x-velocity"].shape)
+    new_field = np.zeros(data["x-velocity"].shape)
     dvzdy = (data["z-velocity"][1:-1,sl_right,1:-1] -
              data["z-velocity"][1:-1,sl_left,1:-1]) \
              / (div_fac*data["dy"].flat[0])
@@ -882,7 +1089,7 @@
              / (div_fac*data["dy"].flat[0])
     new_field[1:-1,1:-1,1:-1] += (dvydx - dvxdy)**2.0
     del dvydx, dvxdy
-    new_field = na.abs(new_field)
+    new_field = np.abs(new_field)
     return new_field
 def _convertVorticitySquared(data):
     return data.convert("cm")**-2.0
@@ -902,7 +1109,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dx'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][sl_right,1:-1,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][sl_left ,1:-1,1:-1]/ds
@@ -917,7 +1124,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dy'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,sl_right,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,sl_left ,1:-1]/ds
@@ -932,7 +1139,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dz'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,1:-1,sl_right]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,1:-1,sl_left ]/ds
@@ -947,7 +1154,7 @@
               units=r"\rm{dyne}/\rm{cm}^{3}")
 
 def _gradPressureMagnitude(field, data):
-    return na.sqrt(data["gradPressureX"]**2 +
+    return np.sqrt(data["gradPressureX"]**2 +
                    data["gradPressureY"]**2 +
                    data["gradPressureZ"]**2)
 add_field("gradPressureMagnitude", function=_gradPressureMagnitude,
@@ -964,7 +1171,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dx'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][sl_right,1:-1,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][sl_left ,1:-1,1:-1]/ds
@@ -979,7 +1186,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dy'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,sl_right,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,sl_left ,1:-1]/ds
@@ -994,7 +1201,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dz'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,1:-1,sl_right]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,1:-1,sl_left ]/ds
@@ -1009,7 +1216,7 @@
               units=r"\rm{g}/\rm{cm}^{4}")
 
 def _gradDensityMagnitude(field, data):
-    return na.sqrt(data["gradDensityX"]**2 +
+    return np.sqrt(data["gradDensityX"]**2 +
                    data["gradDensityY"]**2 +
                    data["gradDensityZ"]**2)
 add_field("gradDensityMagnitude", function=_gradDensityMagnitude,
@@ -1035,7 +1242,7 @@
           units=r"\rm{s}^{-1}")
 
 def _BaroclinicVorticityMagnitude(field, data):
-    return na.sqrt(data["BaroclinicVorticityX"]**2 +
+    return np.sqrt(data["BaroclinicVorticityX"]**2 +
                    data["BaroclinicVorticityY"]**2 +
                    data["BaroclinicVorticityZ"]**2)
 add_field("BaroclinicVorticityMagnitude",
@@ -1053,7 +1260,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["z-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["z-velocity"][1:-1,sl_right,1:-1] -
                                  data["z-velocity"][1:-1,sl_left,1:-1]) \
                                  / (div_fac*data["dy"].flat[0])
@@ -1071,7 +1278,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["z-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["x-velocity"][1:-1,1:-1,sl_right] -
                                  data["x-velocity"][1:-1,1:-1,sl_left]) \
                                  / (div_fac*data["dz"].flat[0])
@@ -1089,7 +1296,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["x-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["x-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["y-velocity"][sl_right,1:-1,1:-1] -
                                  data["y-velocity"][sl_left,1:-1,1:-1]) \
                                  / (div_fac*data["dx"].flat[0])
@@ -1108,7 +1315,7 @@
               units=r"\rm{s}^{-1}")
 
 def _VorticityMagnitude(field, data):
-    return na.sqrt(data["VorticityX"]**2 +
+    return np.sqrt(data["VorticityX"]**2 +
                    data["VorticityY"]**2 +
                    data["VorticityZ"]**2)
 add_field("VorticityMagnitude", function=_VorticityMagnitude,
@@ -1127,7 +1334,7 @@
     add_field(n, function=eval("_%s" % n),
               validators=[ValidateSpatial(0)])
 def _VorticityStretchingMagnitude(field, data):
-    return na.sqrt(data["VorticityStretchingX"]**2 +
+    return np.sqrt(data["VorticityStretchingX"]**2 +
                    data["VorticityStretchingY"]**2 +
                    data["VorticityStretchingZ"]**2)
 add_field("VorticityStretchingMagnitude", 
@@ -1149,13 +1356,13 @@
                           ["x-velocity", "y-velocity", "z-velocity"])],
               units=r"\rm{s}^{-2}")
 def _VorticityGrowthMagnitude(field, data):
-    result = na.sqrt(data["VorticityGrowthX"]**2 +
+    result = np.sqrt(data["VorticityGrowthX"]**2 +
                      data["VorticityGrowthY"]**2 +
                      data["VorticityGrowthZ"]**2)
-    dot = na.zeros(result.shape)
+    dot = np.zeros(result.shape)
     for ax in "XYZ":
         dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
-    result = na.sign(dot) * result
+    result = np.sign(dot) * result
     return result
 add_field("VorticityGrowthMagnitude", function=_VorticityGrowthMagnitude,
           validators=[ValidateSpatial(1, 
@@ -1163,7 +1370,7 @@
           units=r"\rm{s}^{-1}",
           take_log=False)
 def _VorticityGrowthMagnitudeABS(field, data):
-    return na.sqrt(data["VorticityGrowthX"]**2 +
+    return np.sqrt(data["VorticityGrowthX"]**2 +
                    data["VorticityGrowthY"]**2 +
                    data["VorticityGrowthZ"]**2)
 add_field("VorticityGrowthMagnitudeABS", function=_VorticityGrowthMagnitudeABS,
@@ -1175,7 +1382,7 @@
     domegax_dt = data["VorticityX"] / data["VorticityGrowthX"]
     domegay_dt = data["VorticityY"] / data["VorticityGrowthY"]
     domegaz_dt = data["VorticityZ"] / data["VorticityGrowthZ"]
-    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
+    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
 add_field("VorticityGrowthTimescale", function=_VorticityGrowthTimescale,
           validators=[ValidateSpatial(1, 
                       ["x-velocity", "y-velocity", "z-velocity"])],
@@ -1208,7 +1415,7 @@
               units=r"\rm{s}^{-1}")
 
 def _VorticityRadPressureMagnitude(field, data):
-    return na.sqrt(data["VorticityRadPressureX"]**2 +
+    return np.sqrt(data["VorticityRadPressureX"]**2 +
                    data["VorticityRadPressureY"]**2 +
                    data["VorticityRadPressureZ"]**2)
 add_field("VorticityRadPressureMagnitude",
@@ -1233,13 +1440,13 @@
                        ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
               units=r"\rm{s}^{-1}")
 def _VorticityRPGrowthMagnitude(field, data):
-    result = na.sqrt(data["VorticityRPGrowthX"]**2 +
+    result = np.sqrt(data["VorticityRPGrowthX"]**2 +
                      data["VorticityRPGrowthY"]**2 +
                      data["VorticityRPGrowthZ"]**2)
-    dot = na.zeros(result.shape)
+    dot = np.zeros(result.shape)
     for ax in "XYZ":
         dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
-    result = na.sign(dot) * result
+    result = np.sign(dot) * result
     return result
 add_field("VorticityRPGrowthMagnitude", function=_VorticityGrowthMagnitude,
           validators=[ValidateSpatial(1, 
@@ -1247,7 +1454,7 @@
           units=r"\rm{s}^{-1}",
           take_log=False)
 def _VorticityRPGrowthMagnitudeABS(field, data):
-    return na.sqrt(data["VorticityRPGrowthX"]**2 +
+    return np.sqrt(data["VorticityRPGrowthX"]**2 +
                    data["VorticityRPGrowthY"]**2 +
                    data["VorticityRPGrowthZ"]**2)
 add_field("VorticityRPGrowthMagnitudeABS", 
@@ -1260,7 +1467,7 @@
     domegax_dt = data["VorticityX"] / data["VorticityRPGrowthX"]
     domegay_dt = data["VorticityY"] / data["VorticityRPGrowthY"]
     domegaz_dt = data["VorticityZ"] / data["VorticityRPGrowthZ"]
-    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
+    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
 add_field("VorticityRPGrowthTimescale", function=_VorticityRPGrowthTimescale,
           validators=[ValidateSpatial(1, 
                       ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/_skeleton/__init__.py
--- /dev/null
+++ b/yt/frontends/_skeleton/__init__.py
@@ -0,0 +1,25 @@
+"""
+API for yt.frontends.skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/_skeleton/api.py
--- /dev/null
+++ b/yt/frontends/_skeleton/api.py
@@ -0,0 +1,37 @@
+"""
+API for yt.frontends._skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .data_structures import \
+      SkeletonGrid, \
+      SkeletonHierarchy, \
+      SkeletonStaticOutput
+
+from .fields import \
+      SkeletonFieldInfo, \
+      add_flash_field
+
+from .io import \
+      IOHandlerSkeleton


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/_skeleton/data_structures.py
--- /dev/null
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -0,0 +1,157 @@
+"""
+Skeleton data structures
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import stat
+import numpy as np
+import weakref
+
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.data_objects.hierarchy import \
+    AMRHierarchy
+from yt.data_objects.static_output import \
+    StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.io_handler import \
+    io_registry
+from yt.utilities.physical_constants import cm_per_mpc
+from .fields import SkeletonFieldInfo, add_flash_field, KnownSkeletonFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc, ValidateDataField, TranslationFunc
+
+class SkeletonGrid(AMRGridPatch):
+    _id_offset = 0
+    #__slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = None
+        self.Children = []
+        self.Level = level
+
+    def __repr__(self):
+        return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+
+class SkeletonHierarchy(AMRHierarchy):
+
+    grid = SkeletonGrid
+    float_type = np.float64
+    
+    def __init__(self, pf, data_style='skeleton'):
+        self.data_style = data_style
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        AMRHierarchy.__init__(self, pf, data_style)
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        # This needs to set a self.field_list that contains all the available,
+        # on-disk fields.
+        pass
+    
+    def _count_grids(self):
+        # This needs to set self.num_grids
+        pass
+        
+    def _parse_hierarchy(self):
+        # This needs to fill the following arrays, where N is self.num_grids:
+        #   self.grid_left_edge         (N, 3) <= float64
+        #   self.grid_right_edge        (N, 3) <= float64
+        #   self.grid_dimensions        (N, 3) <= int
+        #   self.grid_particle_count    (N, 1) <= int
+        #   self.grid_levels            (N, 1) <= int
+        #   self.grids                  (N, 1) <= grid objects
+        #   
+        pass
+                        
+    def _populate_grid_objects(self):
+        # For each grid, this must call:
+        #   grid._prepare_grid()
+        #   grid._setup_dx()
+        # This must also set:
+        #   grid.Children <= list of child grids
+        #   grid.Parent   <= parent grid
+        # This is handled by the frontend because often the children must be
+        # identified.
+        pass
+
+class SkeletonStaticOutput(StaticOutput):
+    _hierarchy_class = SkeletonHierarchy
+    _fieldinfo_fallback = SkeletonFieldInfo
+    _fieldinfo_known = KnownSkeletonFields
+    _handle = None
+    
+    def __init__(self, filename, data_style='skeleton',
+                 storage_filename = None,
+                 conversion_override = None):
+
+        if conversion_override is None: conversion_override = {}
+        self._conversion_override = conversion_override
+
+        StaticOutput.__init__(self, filename, data_style)
+        self.storage_filename = storage_filename
+
+    def _set_units(self):
+        # This needs to set up the dictionaries that convert from code units to
+        # CGS.  The needed items are listed in the second entry:
+        #   self.time_units         <= sec_conversion
+        #   self.conversion_factors <= mpc_conversion
+        #   self.units              <= On-disk fields
+        pass
+
+    def _parse_parameter_file(self):
+        # This needs to set up the following items:
+        #
+        #   self.unique_identifier
+        #   self.parameters             <= full of code-specific items of use
+        #   self.domain_left_edge       <= array of float64
+        #   self.domain_right_edge      <= array of float64
+        #   self.dimensionality         <= int
+        #   self.domain_dimensions      <= array of int64
+        #   self.current_time           <= simulation time in code units
+        #
+        # We also set up cosmological information.  Set these to zero if
+        # non-cosmological.
+        #
+        #   self.cosmological_simulation    <= int, 0 or 1
+        #   self.current_redshift           <= float
+        #   self.omega_lambda               <= float
+        #   self.omega_matter               <= float
+        #   self.hubble_constant            <= float
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        # This accepts a filename or a set of arguments and returns True or
+        # False depending on if the file is of the type requested.
+        return False
+
+




diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/_skeleton/fields.py
--- /dev/null
+++ b/yt/frontends/_skeleton/fields.py
@@ -0,0 +1,102 @@
+"""
+Skeleton-specific fields
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+from yt.utilities.physical_constants import \
+    kboltz
+
+# The first field container is where any fields that exist on disk go, along
+# with their conversion factors, display names, etc.
+
+KnownSkeletonFields = FieldInfoContainer()
+add_skeleton_field = KnownSkeletonFields.add_field
+
+SkeletonFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = SkeletonFieldInfo.add_field
+
+# Often, we want to translate between fields on disk and fields in yt.  This
+# construct shows how to do that.  Note that we use TranslationFunc.
+
+translation_dict = {"x-velocity": "velx",
+                    "y-velocity": "vely",
+                    "z-velocity": "velz",
+                    "Density": "dens",
+                    "Temperature": "temp",
+                    "Pressure" : "pres", 
+                    "Grav_Potential" : "gpot",
+                    "particle_position_x" : "particle_posx",
+                    "particle_position_y" : "particle_posy",
+                    "particle_position_z" : "particle_posz",
+                    "particle_velocity_x" : "particle_velx",
+                    "particle_velocity_y" : "particle_vely",
+                    "particle_velocity_z" : "particle_velz",
+                    "particle_index" : "particle_tag",
+                    "Electron_Fraction" : "elec",
+                    "HI_Fraction" : "h   ",
+                    "HD_Fraction" : "hd  ",
+                    "HeI_Fraction": "hel ",
+                    "HeII_Fraction": "hep ",
+                    "HeIII_Fraction": "hepp",
+                    "HM_Fraction": "hmin",
+                    "HII_Fraction": "hp  ",
+                    "H2I_Fraction": "htwo",
+                    "H2II_Fraction": "htwp",
+                    "DI_Fraction": "deut",
+                    "DII_Fraction": "dplu",
+                    "ParticleMass": "particle_mass",
+                    "Flame_Fraction": "flam"}
+
+for f,v in translation_dict.items():
+    if v not in KnownSkeletonFields:
+        pfield = v.startswith("particle")
+        add_skeleton_field(v, function=NullFunc, take_log=False,
+                  validators = [ValidateDataField(v)],
+                  particle_type = pfield)
+    if f.endswith("_Fraction") :
+        dname = "%s\/Fraction" % f.split("_")[0]
+    else :
+        dname = f                    
+    ff = KnownSkeletonFields[v]
+    pfield = f.startswith("particle")
+    add_field(f, TranslationFunc(v),
+              take_log=KnownSkeletonFields[v].take_log,
+              units = ff._units, display_name=dname,
+              particle_type = pfield)
+
+# Here's an example of adding a new field:
+
+add_skeleton_field("dens", function=NullFunc, take_log=True,
+                convert_function=_get_convert("dens"),
+                units=r"\rm{g}/\rm{cm}^3")


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/_skeleton/io.py
--- /dev/null
+++ b/yt/frontends/_skeleton/io.py
@@ -0,0 +1,44 @@
+"""
+Skeleton-specific IO functions
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+import h5py
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+class IOHandlerSkeleton(BaseIOHandler):
+    _particle_reader = False
+    _data_style = "skeleton"
+
+    def _read_data_set(self, grid, field):
+        # This must return the array, of size/shape grid.ActiveDimensions, that
+        # corresponds to 'field'.
+        pass
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        # If this is not implemented, the IO handler will just slice a
+        # _read_data_set item.
+        pass




diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/_skeleton/setup.py
--- /dev/null
+++ b/yt/frontends/_skeleton/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('skeleton', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import stat
 import weakref
 import cPickle
@@ -106,7 +106,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -120,10 +120,10 @@
             return self.start_index
         if len(self.Parent) == 0:
             start_index = self.LeftEdge / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+                       np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -141,7 +141,7 @@
         #for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
         self._setup_field_list()
         
@@ -180,9 +180,9 @@
                           self.pf.child_grid_offset,
                           self.pf.min_level, self.pf.max_level)
         self.pf.level_info[0]=self.pf.ncell
-        self.pf.level_info = na.array(self.pf.level_info)        
+        self.pf.level_info = np.array(self.pf.level_info)        
         self.pf.level_offsets = self.pf.level_child_offsets
-        self.pf.level_offsets = na.array(self.pf.level_offsets, dtype='int64')
+        self.pf.level_offsets = np.array(self.pf.level_offsets, dtype='int64')
         self.pf.level_offsets[0] = self.pf.root_grid_offset
         
         self.pf.level_art_child_masks = {}
@@ -192,10 +192,10 @@
         del cm
         
         root_psg = _ramses_reader.ProtoSubgrid(
-                        na.zeros(3, dtype='int64'), # left index of PSG
+                        np.zeros(3, dtype='int64'), # left index of PSG
                         self.pf.domain_dimensions, # dim of PSG
-                        na.zeros((1,3), dtype='int64'), # left edges of grids
-                        na.zeros((1,6), dtype='int64') # empty
+                        np.zeros((1,3), dtype='int64'), # left edges of grids
+                        np.zeros((1,6), dtype='int64') # empty
                         )
         
         self.proto_grids = [[root_psg],]
@@ -224,8 +224,8 @@
             #compute the hilbert indices up to a certain level
             #the indices will associate an oct grid to the nearest
             #hilbert index?
-            base_level = int( na.log10(self.pf.domain_dimensions.max()) /
-                              na.log10(2))
+            base_level = int( np.log10(self.pf.domain_dimensions.max()) /
+                              np.log10(2))
             hilbert_indices = _ramses_reader.get_hilbert_indices(
                                     level + base_level, left_index)
             #print base_level, hilbert_indices.max(),
@@ -234,7 +234,7 @@
             
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.
-            unique_indices = na.unique(hilbert_indices)
+            unique_indices = np.unique(hilbert_indices)
             mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
                         level, unique_indices.size, hilbert_indices.size)
             
@@ -260,15 +260,15 @@
                 #why would we ever have non-unique octs?
                 #perhaps the hilbert ordering may visit the same
                 #oct multiple times - review only unique octs 
-                #for idomain in na.unique(ddfl[:,1]):
+                #for idomain in np.unique(ddfl[:,1]):
                 #dom_ind = ddfl[:,1] == idomain
                 #dleft_index = ddleft_index[dom_ind,:]
                 #dfl = ddfl[dom_ind,:]
                 
                 dleft_index = ddleft_index
                 dfl = ddfl
-                initial_left = na.min(dleft_index, axis=0)
-                idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                initial_left = np.min(dleft_index, axis=0)
+                idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
                 #this creates a grid patch that doesn't cover the whole level
                 #necessarily, but with other patches covers all the regions
                 #with octs. This object automatically shrinks its size
@@ -298,8 +298,8 @@
                 
                 step+=1
                 pbar.update(step)
-            eff_mean = na.mean(psg_eff)
-            eff_nmin = na.sum([e<=min_eff*tol for e in psg_eff])
+            eff_mean = np.mean(psg_eff)
+            eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
             eff_nall = len(psg_eff)
             mylog.info("Average subgrid efficiency %02.1f %%",
                         eff_mean*100.0)
@@ -345,14 +345,14 @@
                 self.grid_right_edge[gi,:] = props[1,:] / dds
                 self.grid_dimensions[gi,:] = props[2,:]
                 self.grid_levels[gi,:] = level
-                child_mask = na.zeros(props[2,:],'uint8')
+                child_mask = np.zeros(props[2,:],'uint8')
                 amr_utils.fill_child_mask(fl,props[0],
                     self.pf.level_art_child_masks[level],
                     child_mask)
                 grids.append(self.grid(gi, self, level, fl, 
-                    props*na.array(correction).astype('int64')))
+                    props*np.array(correction).astype('int64')))
                 gi += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         
 
         if self.pf.file_particle_data:
@@ -372,7 +372,7 @@
             pbar.update(1)
             npa,npb=0,0
             npb = lspecies[-1]
-            clspecies = na.concatenate(([0,],lspecies))
+            clspecies = np.concatenate(([0,],lspecies))
             if self.pf.only_particle_type is not None:
                 npb = lspecies[0]
                 if type(self.pf.only_particle_type)==type(5):
@@ -388,13 +388,13 @@
             self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
             self.pf.particle_velocity  *= uv #to proper cm/s
             pbar.update(4)
-            self.pf.particle_type         = na.zeros(np,dtype='uint8')
-            self.pf.particle_mass         = na.zeros(np,dtype='float64')
-            self.pf.particle_mass_initial = na.zeros(np,dtype='float64')-1
-            self.pf.particle_creation_time= na.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity1 = na.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity2 = na.zeros(np,dtype='float64')-1
-            self.pf.particle_age          = na.zeros(np,dtype='float64')-1
+            self.pf.particle_type         = np.zeros(np,dtype='uint8')
+            self.pf.particle_mass         = np.zeros(np,dtype='float64')
+            self.pf.particle_mass_initial = np.zeros(np,dtype='float64')-1
+            self.pf.particle_creation_time= np.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity1 = np.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity2 = np.zeros(np,dtype='float64')-1
+            self.pf.particle_age          = np.zeros(np,dtype='float64')-1
             
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
             self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
@@ -461,17 +461,17 @@
             init = self.pf.particle_position.shape[0]
             pos = self.pf.particle_position
             #particle indices travel with the particle positions
-            #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
+            #pos = np.vstack((np.arange(pos.shape[0]),pos.T)).T 
             if type(self.pf.grid_particles) == type(5):
                 particle_level = min(self.pf.max_level,self.pf.grid_particles)
             else:
                 particle_level = 2
-            grid_particle_count = na.zeros((len(grids),1),dtype='int64')
+            grid_particle_count = np.zeros((len(grids),1),dtype='int64')
 
             pbar = get_pbar("Gridding Particles ",init)
             assignment,ilists = amr_utils.assign_particles_to_cell_lists(
                     self.grid_levels.ravel().astype('int32'),
-                    na.zeros(len(pos[:,0])).astype('int32')-1,
+                    np.zeros(len(pos[:,0])).astype('int32')-1,
                     particle_level, #dont grid particles past this
                     self.grid_left_edge.astype('float32'),
                     self.grid_right_edge.astype('float32'),
@@ -500,10 +500,10 @@
             
 
     def _get_grid_parents(self, grid, LE, RE):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(LE, RE)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level-1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level-1)).flat)
         return self.grids[mask]
 
     def _populate_grid_objects(self):
@@ -519,7 +519,7 @@
         self.max_level = self.grid_levels.max()
 
     # def _populate_grid_objects(self):
-    #     mask = na.empty(self.grids.size, dtype='int32')
+    #     mask = np.empty(self.grids.size, dtype='int32')
     #     pb = get_pbar("Populating grids", len(self.grids))
     #     for gi,g in enumerate(self.grids):
     #         pb.update(gi)
@@ -609,7 +609,7 @@
         self.single_particle_mass = single_particle_mass
         
         if limit_level is None:
-            self.limit_level = na.inf
+            self.limit_level = np.inf
         else:
             limit_level = int(limit_level)
             mylog.info("Using maximum level: %i",limit_level)
@@ -685,7 +685,7 @@
         wmu = self["wmu"]
         #ng = self.domain_dimensions[0]
         #r0 = self["cmh"]/ng # comoving cm h^-1
-        #t0 = 6.17e17/(self.hubble_constant + na.sqrt(self.omega_matter))
+        #t0 = 6.17e17/(self.hubble_constant + np.sqrt(self.omega_matter))
         #v0 = r0 / t0
         #rho0 = 1.8791e-29 * self.hubble_constant**2.0 * self.omega_matter
         #e0 = v0**2.0
@@ -696,7 +696,7 @@
         hubble = self.hubble_constant
         ng = self.domain_dimensions[0]
         self.r0 = boxh/ng
-        self.v0 =  self.r0 * 50.0*1.0e5 * na.sqrt(self.omega_matter)  #cm/s
+        self.v0 =  self.r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
         self.t0 = self.r0/self.v0
         # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
         # ie, critical density 
@@ -730,8 +730,8 @@
     def _parse_parameter_file(self):
         # We set our domain to run from 0 .. 1 since we are otherwise
         # unconstrained.
-        self.domain_left_edge = na.zeros(3, dtype="float64")
-        self.domain_right_edge = na.ones(3, dtype="float64")
+        self.domain_left_edge = np.zeros(3, dtype="float64")
+        self.domain_right_edge = np.ones(3, dtype="float64")
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
         self.parameters = {}
@@ -812,10 +812,10 @@
         self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
         #self.hubble_time /= 3.168876e7 #Gyr in s 
         # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-        #     return 1./(x*na.sqrt(oml+omb*x**-3.0))
-        # spacings = na.logspace(-5,na.log10(self.parameters['aexpn']),1e5)
+        #     return 1./(x*np.sqrt(oml+omb*x**-3.0))
+        # spacings = np.logspace(-5,np.log10(self.parameters['aexpn']),1e5)
         # integrand_arr = integrand(spacings)
-        # self.current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+        # self.current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
         # self.current_time *= self.hubble_time
         self.current_time = b2t(self.current_time_raw) * sec_per_Gyr
         for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
@@ -824,7 +824,7 @@
         
         Om0 = self.parameters['Om0']
         hubble = self.parameters['hubble']
-        dummy = 100.0 * hubble * na.sqrt(Om0)
+        dummy = 100.0 * hubble * np.sqrt(Om0)
         ng = self.parameters['ng']
         wmu = self.parameters["wmu"]
         boxh = header_vals['boxh'] 
@@ -836,7 +836,7 @@
         self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
         #velocity velocity units in km/s
         self.parameters["v0"] = 50.0*self.parameters["r0"]*\
-                na.sqrt(self.parameters["Om0"])
+                np.sqrt(self.parameters["Om0"])
         #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
         self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
         rho0 = self.parameters["rho0"]
@@ -857,10 +857,10 @@
     
         (self.ncell,) = struct.unpack('>l', _read_record(f))
         # Try to figure out the root grid dimensions
-        est = int(na.rint(self.ncell**(1.0/3.0)))
+        est = int(np.rint(self.ncell**(1.0/3.0)))
         # Note here: this is the number of *cells* on the root grid.
         # This is not the same as the number of Octs.
-        self.domain_dimensions = na.ones(3, dtype='int64')*est 
+        self.domain_dimensions = np.ones(3, dtype='int64')*est 
 
         self.root_grid_mask_offset = f.tell()
         #_skip_record(f) # iOctCh
@@ -927,8 +927,8 @@
         seek_extras = 137
         fh.seek(seek_extras)
         n = self.parameters['Nspecies']
-        self.parameters['wspecies'] = na.fromfile(fh,dtype='>f',count=10)
-        self.parameters['lspecies'] = na.fromfile(fh,dtype='>i',count=10)
+        self.parameters['wspecies'] = np.fromfile(fh,dtype='>f',count=10)
+        self.parameters['lspecies'] = np.fromfile(fh,dtype='>i',count=10)
         self.parameters['wspecies'] = self.parameters['wspecies'][:n]
         self.parameters['lspecies'] = self.parameters['lspecies'][:n]
         fh.close()


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -44,7 +44,7 @@
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = ARTFieldInfo.add_field
 
-import numpy as na
+import numpy as np
 
 #these are just the hydro fields
 known_art_fields = [ 'Density','TotalEnergy',
@@ -178,7 +178,7 @@
     di = dd==0.0
     #dd[di] = -1.0
     tr = dg/dd
-    #tr[na.isnan(tr)] = 0.0
+    #tr[np.isnan(tr)] = 0.0
     #if data.id==460:
     #    import pdb;pdb.set_trace()
     tr /= data.pf.conversion_factors["GasEnergy"]
@@ -186,7 +186,7 @@
     tr *= data.pf.tr
     #tr[di] = -1.0 #replace the zero-density points with zero temp
     #print tr.min()
-    #assert na.all(na.isfinite(tr))
+    #assert np.all(np.isfinite(tr))
     return tr
 def _converttemperature(data):
     x = data.pf.conversion_factors["Temperature"]
@@ -258,9 +258,9 @@
     #make a dumb assumption that the mass is evenly spread out in the grid
     #must return an array the shape of the grid cells
     tr  = data["Ones"] #create a grid in the right size
-    if na.sum(idx)>0:
-        tr /= na.prod(tr.shape) #divide by the volume
-        tr *= na.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
+    if np.sum(idx)>0:
+        tr /= np.prod(tr.shape) #divide by the volume
+        tr *= np.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
         return tr
     else:
         return tr*0.0


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import struct
 
 import os
@@ -93,9 +93,9 @@
         f.seek(self.level_offsets[level])
         ncells = 8*self.level_info[level]
         nvals = ncells * (self.nhydro_vars + 6) # 2 vars, 2 pads
-        arr = na.fromfile(f, dtype='>f', count=nvals)
+        arr = np.fromfile(f, dtype='>f', count=nvals)
         arr = arr.reshape((self.nhydro_vars+6, ncells), order="F")
-        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         arr = arr[3:-1,:] #skip beginning pad, idc, iOctCh, + ending pad
         if field==None:
             self.level_data[level] = arr.astype('float32')
@@ -108,13 +108,13 @@
         f.seek(self.level_offsets[0] + 4) # Ditch the header
         ncells = self.level_info[0]
         nhvals = ncells * (self.nhydro_vars) # 0 vars, 0 pads
-        hvar = na.fromfile(f, dtype='>f', count=nhvals).astype("float32")
+        hvar = np.fromfile(f, dtype='>f', count=nhvals).astype("float32")
         hvar = hvar.reshape((self.nhydro_vars, ncells), order="F")
-        na.fromfile(f,dtype='>i',count=2) #throw away the pads
+        np.fromfile(f,dtype='>i',count=2) #throw away the pads
         nvars = ncells * (2) # 0 vars, 0 pads
-        var = na.fromfile(f, dtype='>f', count=nvars).astype("float32")
+        var = np.fromfile(f, dtype='>f', count=nvars).astype("float32")
         var = var.reshape((2, ncells), order="F")
-        arr = na.concatenate((hvar,var))
+        arr = np.concatenate((hvar,var))
         self.level_data[0] = arr
 
     def clear_level(self, level):
@@ -122,9 +122,9 @@
 
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
-        idx = na.array(grid.particle_indices)
+        idx = np.array(grid.particle_indices)
         if field == 'particle_index':
-            return na.array(idx)
+            return np.array(idx)
         if field == 'particle_type':
             return grid.pf.particle_type[idx]
         if field == 'particle_position_x':
@@ -168,10 +168,10 @@
             tr = self.level_data[0][field_id,:].reshape(
                     pf.domain_dimensions, order="F").copy()
             return tr.swapaxes(0, 2).astype("float64")
-        tr = na.zeros(grid.ActiveDimensions, dtype='float32')
+        tr = np.zeros(grid.ActiveDimensions, dtype='float32')
         grids = [grid]
         l_delta = 0
-        filled = na.zeros(grid.ActiveDimensions, dtype='uint8')
+        filled = np.zeros(grid.ActiveDimensions, dtype='uint8')
         to_fill = grid.ActiveDimensions.prod()
         while to_fill > 0 and len(grids) > 0:
             next_grids = []
@@ -198,9 +198,9 @@
     level_child_offsets= [0,]
     f.seek(offset)
     nchild,ntot=8,0
-    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
     for Lev in xrange(MinLev + 1, MaxLevelNow+1):
         level_oct_offsets.append(f.tell())
 
@@ -242,20 +242,20 @@
     #fortran indices start at 1
     
     #Skip all the oct hierarchy data
-    le     = na.zeros((nLevel,3),dtype='int64')
-    fl     = na.ones((nLevel,6),dtype='int64')
-    iocts  = na.zeros(nLevel+1,dtype='int64')
+    le     = np.zeros((nLevel,3),dtype='int64')
+    fl     = np.ones((nLevel,6),dtype='int64')
+    iocts  = np.zeros(nLevel+1,dtype='int64')
     idxa,idxb = 0,0
     chunk = long(1e6) #this is ~111MB for 15 dimensional 64 bit arrays
     left = nLevel
     while left > 0 :
         this_chunk = min(chunk,left)
         idxb=idxa+this_chunk
-        data = na.fromfile(f,dtype='>i',count=this_chunk*15)
+        data = np.fromfile(f,dtype='>i',count=this_chunk*15)
         data=data.reshape(this_chunk,15)
         left-=this_chunk
         le[idxa:idxb,:] = data[:,1:4]
-        fl[idxa:idxb,1] = na.arange(idxa,idxb)
+        fl[idxa:idxb,1] = np.arange(idxa,idxb)
         #pad byte is last, LL2, then ioct right before it
         iocts[idxa:idxb] = data[:,-3] 
         idxa=idxa+this_chunk
@@ -272,12 +272,12 @@
     #now correct iocts for fortran indices start @ 1
     iocts = iocts-1
 
-    assert na.unique(iocts).shape[0] == nLevel
+    assert np.unique(iocts).shape[0] == nLevel
     
     #ioct tries to access arrays much larger than le & fl
     #just make sure they appear in the right order, skipping
     #the empty space in between
-    idx = na.argsort(iocts)
+    idx = np.argsort(iocts)
     
     #now rearrange le & fl in order of the ioct
     le = le[idx]
@@ -294,7 +294,7 @@
     #now read the hvars and vars arrays
     #we are looking for iOctCh
     #we record if iOctCh is >0, in which it is subdivided
-    iOctCh  = na.zeros((nLevel+1,8),dtype='bool')
+    iOctCh  = np.zeros((nLevel+1,8),dtype='bool')
     
     
     
@@ -309,9 +309,9 @@
     np_per_page = Nrow**2 # defined in ART a_setup.h
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
 
-    f = na.fromfile(file, dtype='>f4').astype('float32') # direct access
-    pages = na.vsplit(na.reshape(f, (num_pages, words, np_per_page)), num_pages)
-    data = na.squeeze(na.dstack(pages)).T # x,y,z,vx,vy,vz
+    f = np.fromfile(file, dtype='>f4').astype('float32') # direct access
+    pages = np.vsplit(np.reshape(f, (num_pages, words, np_per_page)), num_pages)
+    data = np.squeeze(np.dstack(pages)).T # x,y,z,vx,vy,vz
     return data[:,0:3],data[:,3:]
 
 def read_stars(file,nstars,Nrow):
@@ -332,8 +332,8 @@
 def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
     f.seek(level_child_offsets[level])
     nvals = nLevel * (nhydro_vars + 6) # 2 vars, 2 pads
-    ioctch = na.zeros(nLevel,dtype='uint8')
-    idc = na.zeros(nLevel,dtype='int32')
+    ioctch = np.zeros(nLevel,dtype='uint8')
+    idc = np.zeros(nLevel,dtype='int32')
     
     chunk = long(1e6)
     left = nLevel
@@ -342,9 +342,9 @@
     while left > 0:
         chunk = min(chunk,left)
         b += chunk
-        arr = na.fromfile(f, dtype='>i', count=chunk*width)
+        arr = np.fromfile(f, dtype='>i', count=chunk*width)
         arr = arr.reshape((width, chunk), order="F")
-        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         idc[a:b]    = arr[1,:]-1 #fix fortran indexing
         ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
         #zero in the mask means there is refinement available
@@ -354,12 +354,12 @@
     return idc,ioctch
     
 nchem=8+2
-dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
+dtyp = np.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
                 ",>%sf4"%(2)+",>i4")
 def _read_art_child(f, level_child_offsets,level,nLevel,field):
     pos=f.tell()
     f.seek(level_child_offsets[level])
-    arr = na.fromfile(f, dtype='>f', count=nLevel * 8)
+    arr = np.fromfile(f, dtype='>f', count=nLevel * 8)
     arr = arr.reshape((nLevel,16), order="F")
     arr = arr[3:-1,:].astype("float64")
     f.seek(pos)
@@ -372,8 +372,8 @@
 
 def _read_frecord(f,fmt):
     s1 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
-    count = s1/na.dtype(fmt).itemsize
-    ss = na.fromfile(f,fmt,count=count)
+    count = s1/np.dtype(fmt).itemsize
+    ss = np.fromfile(f,fmt,count=count)
     s2 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
     assert s1==s2
     return ss
@@ -406,14 +406,14 @@
 
 #All of these functions are to convert from hydro time var to 
 #proper time
-sqrt = na.sqrt
-sign = na.sign
+sqrt = np.sqrt
+sign = np.sign
 
 def find_root(f,a,b,tol=1e-6):
     c = (a+b)/2.0
-    last = -na.inf
+    last = -np.inf
     assert(sign(f(a)) != sign(f(b)))  
-    while na.abs(f(c)-last) > tol:
+    while np.abs(f(c)-last) > tol:
         last=f(c)
         if sign(last)==sign(f(b)):
             b=c
@@ -423,9 +423,9 @@
     return c
 
 def quad(fintegrand,xmin,xmax,n=1e4):
-    spacings = na.logspace(na.log10(xmin),na.log10(xmax),n)
+    spacings = np.logspace(np.log10(xmin),np.log10(xmax),n)
     integrand_arr = fintegrand(spacings)
-    val = na.trapz(integrand_arr,dx=na.diff(spacings))
+    val = np.trapz(integrand_arr,dx=np.diff(spacings))
     return val
 
 def a2b(at,Om0=0.27,Oml0=0.73,h=0.700):
@@ -450,14 +450,14 @@
     integrand = lambda x : 1./(x*sqrt(Oml0+Om0*x**-3.0))
     #current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
     current_time = quad(integrand,1e-4,at)
-    #spacings = na.logspace(-5,na.log10(at),1e5)
+    #spacings = np.logspace(-5,np.log10(at),1e5)
     #integrand_arr = integrand(spacings)
-    #current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+    #current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
     current_time *= 9.779/h
     return current_time
 
 def b2t(tb,n = 1e2,logger=None,**kwargs):
-    tb = na.array(tb)
+    tb = np.array(tb)
     if type(tb) == type(1.1): 
         return a2t(b2a(tb))
     if tb.shape == (): 
@@ -465,14 +465,14 @@
     if len(tb) < n: n= len(tb)
     age_min = a2t(b2a(tb.max(),**kwargs),**kwargs)
     age_max = a2t(b2a(tb.min(),**kwargs),**kwargs)
-    tbs  = -1.*na.logspace(na.log10(-tb.min()),
-                          na.log10(-tb.max()),n)
+    tbs  = -1.*np.logspace(np.log10(-tb.min()),
+                          np.log10(-tb.max()),n)
     ages = []
     for i,tbi in enumerate(tbs):
         ages += a2t(b2a(tbi)),
         if logger: logger(i)
-    ages = na.array(ages)
-    fb2t = na.interp(tb,tbs,ages)
+    ages = np.array(ages)
+    fb2t = np.interp(tb,tbs,ages)
     #fb2t = interp1d(tbs,ages)
     return fb2t
 




diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/athena/api.py
--- /dev/null
+++ b/yt/frontends/athena/api.py
@@ -0,0 +1,42 @@
+"""
+API for yt.frontends.athena
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: J.S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: MSU
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+from .data_structures import \
+      AthenaGrid, \
+      AthenaHierarchy, \
+      AthenaStaticOutput
+
+from .fields import \
+      AthenaFieldInfo, \
+      KnownAthenaFields, \
+      add_athena_field
+
+from .io import \
+      IOHandlerAthena


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/athena/data_structures.py
--- /dev/null
+++ b/yt/frontends/athena/data_structures.py
@@ -0,0 +1,356 @@
+"""
+Data structures for Athena.
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import numpy as np
+import weakref
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+           AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+    GridGeometryHandler
+from yt.data_objects.static_output import \
+           StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+from .fields import AthenaFieldInfo, KnownAthenaFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+
+def _get_convert(fname):
+    def _conv(data):
+        return data.convert(fname)
+    return _conv
+
+class AthenaGrid(AMRGridPatch):
+    _id_offset = 0
+    def __init__(self, id, hierarchy, level, start, dimensions):
+        df = hierarchy.storage_filename
+        if 'id0' not in hierarchy.parameter_file.filename:
+            gname = hierarchy.parameter_file.filename
+        else:
+            if id == 0:
+                gname = 'id0/%s.vtk' % df
+            else:
+                gname = 'id%i/%s-id%i%s.vtk' % (id, df[:-5], id, df[-5:] )
+        AMRGridPatch.__init__(self, id, filename = gname,
+                              hierarchy = hierarchy)
+        self.filename = gname
+        self.Parent = []
+        self.Children = []
+        self.Level = level
+        self.start_index = start.copy()
+        self.stop_index = self.start_index + dimensions
+        self.ActiveDimensions = dimensions.copy()
+
+    def _setup_dx(self):
+        # So first we figure out what the index is.  We don't assume
+        # that dx=dy=dz , at least here.  We probably do elsewhere.
+        id = self.id - self._id_offset
+        if len(self.Parent) > 0:
+            self.dds = self.Parent[0].dds / self.pf.refine_by
+        else:
+            LE, RE = self.hierarchy.grid_left_edge[id,:], \
+                     self.hierarchy.grid_right_edge[id,:]
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
+        if self.pf.dimensionality < 2: self.dds[1] = 1.0
+        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+def parse_line(line, grid):
+    # grid is a dictionary
+    splitup = line.strip().split()
+    if "vtk" in splitup:
+        grid['vtk_version'] = splitup[-1]
+    elif "Really" in splitup:
+        grid['time'] = splitup[-1]
+    elif any(x in ['PRIMITIVE','CONSERVED'] for x in splitup):
+        grid['time'] = float(splitup[4].rstrip(','))
+        grid['level'] = int(splitup[6].rstrip(','))
+        grid['domain'] = int(splitup[8].rstrip(','))
+    elif "DIMENSIONS" in splitup:
+        grid['dimensions'] = np.array(splitup[-3:]).astype('int')
+    elif "ORIGIN" in splitup:
+        grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
+    elif "SPACING" in splitup:
+        grid['dds'] = np.array(splitup[-3:]).astype('float64')
+    elif "CELL_DATA" in splitup:
+        grid["ncells"] = int(splitup[-1])
+    elif "SCALARS" in splitup:
+        field = splitup[1]
+        grid['read_field'] = field
+        grid['read_type'] = 'scalar'
+    elif "VECTORS" in splitup:
+        field = splitup[1]
+        grid['read_field'] = field
+        grid['read_type'] = 'vector'
+
+class AthenaHierarchy(GridGeometryHandler):
+
+    grid = AthenaGrid
+    _data_style='athena'
+    
+    def __init__(self, pf, data_style='athena'):
+        self.parameter_file = weakref.proxy(pf)
+        self.data_style = data_style
+        # for now, the hierarchy file is the parameter file!
+        self.storage_filename = self.parameter_file.storage_filename
+        self.hierarchy_filename = self.parameter_file.filename
+        #self.directory = os.path.dirname(self.hierarchy_filename)
+        self._fhandle = file(self.hierarchy_filename,'rb')
+        AMRHierarchy.__init__(self, pf, data_style)
+
+        self._fhandle.close()
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        field_map = {}
+        f = open(self.hierarchy_filename,'rb')
+        line = f.readline()
+        while line != '':
+            splitup = line.strip().split()
+            if "DIMENSIONS" in splitup:
+                grid_dims = np.array(splitup[-3:]).astype('int')
+                line = f.readline()
+            elif "CELL_DATA" in splitup:
+                grid_ncells = int(splitup[-1])
+                line = f.readline()
+                if np.prod(grid_dims) != grid_ncells:
+                    grid_dims -= 1
+                    grid_dims[grid_dims==0]=1
+                if np.prod(grid_dims) != grid_ncells:
+                    mylog.error('product of dimensions %i not equal to number of cells %i' %
+                          (np.prod(grid_dims), grid_ncells))
+                    raise TypeError
+                break
+            else:
+                line = f.readline()
+        read_table = False
+        read_table_offset = f.tell()
+        while line != '':
+            splitup = line.strip().split()
+            if 'SCALARS' in splitup:
+                field = splitup[1]
+                if not read_table:
+                    line = f.readline() # Read the lookup table line
+                    read_table = True
+                field_map[field] = ('scalar', f.tell() - read_table_offset)
+                read_table=False
+
+            elif 'VECTORS' in splitup:
+                field = splitup[1]
+                for ax in 'xyz':
+                    field_map["%s_%s" % (field, ax)] =\
+                            ('vector', f.tell() - read_table_offset)
+            line = f.readline()
+
+        f.close()
+
+        self.field_list = field_map.keys()
+        self._field_map = field_map
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = self.parameter_file.nvtk
+
+    def _parse_hierarchy(self):
+        f = open(self.hierarchy_filename,'rb')
+        grid = {}
+        grid['read_field'] = None
+        grid['read_type'] = None
+        table_read=False
+        line = f.readline()
+        while grid['read_field'] is None:
+            parse_line(line, grid)
+            if "SCALAR" in line.strip().split():
+                break
+            if "VECTOR" in line.strip().split():
+                break
+            if 'TABLE' in line.strip().split():
+                break
+            if len(line) == 0: break
+            line = f.readline()
+        f.close()
+
+        # It seems some datasets have a mismatch between ncells and 
+        # the actual grid dimensions.
+        if np.prod(grid['dimensions']) != grid['ncells']:
+            grid['dimensions'] -= 1
+            grid['dimensions'][grid['dimensions']==0]=1
+        if np.prod(grid['dimensions']) != grid['ncells']:
+            mylog.error('product of dimensions %i not equal to number of cells %i' % 
+                  (np.prod(grid['dimensions']), grid['ncells']))
+            raise TypeError
+
+        dxs=[]
+        self.grids = np.empty(self.num_grids, dtype='object')
+        levels = np.zeros(self.num_grids, dtype='int32')
+        single_grid_width = grid['dds']*grid['dimensions']
+        grids_per_dim = (self.parameter_file.domain_width/single_grid_width).astype('int32')
+        glis = np.empty((self.num_grids,3), dtype='int64')
+        for i in range(self.num_grids):
+            procz = i/(grids_per_dim[0]*grids_per_dim[1])
+            procy = (i - procz*(grids_per_dim[0]*grids_per_dim[1]))/grids_per_dim[0]
+            procx = i - procz*(grids_per_dim[0]*grids_per_dim[1]) - procy*grids_per_dim[0]
+            glis[i, 0] = procx*grid['dimensions'][0]
+            glis[i, 1] = procy*grid['dimensions'][1]
+            glis[i, 2] = procz*grid['dimensions'][2]
+        gdims = np.ones_like(glis)
+        gdims[:] = grid['dimensions']
+        for i in range(levels.shape[0]):
+            self.grids[i] = self.grid(i, self, levels[i],
+                                      glis[i],
+                                      gdims[i])
+
+            dx = (self.parameter_file.domain_right_edge-
+                  self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+            dx = dx/self.parameter_file.refine_by**(levels[i])
+            dxs.append(grid['dds'])
+        dx = np.array(dxs)
+        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
+        self.grid_dimensions = gdims.astype("int32")
+        self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
+        self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+
+        for g in self.grids:
+            g.Children = self._get_grid_children(g)
+            for g1 in g.Children:
+                g1.Parent.append(g)
+        self.max_level = self.grid_levels.max()
+
+#     def _setup_derived_fields(self):
+#         self.derived_field_list = []
+
+    def _get_grid_children(self, grid):
+        mask = np.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
+class AthenaStaticOutput(StaticOutput):
+    _hierarchy_class = AthenaHierarchy
+    _fieldinfo_fallback = AthenaFieldInfo
+    _fieldinfo_known = KnownAthenaFields
+    _data_style = "athena"
+
+    def __init__(self, filename, data_style='athena',
+                 storage_filename = None, parameters = {}):
+        self.specified_parameters = parameters
+        StaticOutput.__init__(self, filename, data_style)
+        self.filename = filename
+        self.storage_filename = filename[4:-4]
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+
+    def _setup_nounits_units(self):
+        self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
+    def _parse_parameter_file(self):
+        self._handle = open(self.parameter_filename, "rb")
+        # Read the start of a grid to get simulation parameters.
+        grid = {}
+        grid['read_field'] = None
+        line = self._handle.readline()
+        while grid['read_field'] is None:
+            parse_line(line, grid)
+            if "SCALAR" in line.strip().split():
+                break
+            if "VECTOR" in line.strip().split():
+                break
+            if 'TABLE' in line.strip().split():
+                break
+            if len(line) == 0: break
+            line = self._handle.readline()
+
+        self.domain_left_edge = grid['left_edge']
+        if 'domain_right_edge' in self.specified_parameters:
+            self.domain_right_edge = np.array(self.specified_parameters['domain_right_edge'])
+        else:
+            mylog.info("Please set 'domain_right_edge' in parameters dictionary argument " +
+                    "if it is not equal to -domain_left_edge.")
+            self.domain_right_edge = -self.domain_left_edge
+        self.domain_width = self.domain_right_edge-self.domain_left_edge
+        self.domain_dimensions = self.domain_width/grid['dds']
+        refine_by = None
+        if refine_by is None: refine_by = 2
+        self.refine_by = refine_by
+        self.dimensionality = 3
+        self.current_time = grid["time"]
+        self.unique_identifier = self._handle.__hash__()
+        self.cosmological_simulation = False
+        self.num_ghost_zones = 0
+        self.field_ordering = 'fortran'
+        self.boundary_conditions = [1]*6
+
+        self.nvtk = int(np.product(self.domain_dimensions/(grid['dimensions']-1)))
+
+        self.current_redshift = self.omega_lambda = self.omega_matter = \
+            self.hubble_constant = self.cosmological_simulation = 0.0
+        self.parameters['Time'] = self.current_time # Hardcode time conversion for now.
+        self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+        self._handle.close()
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            if 'vtk' in args[0]:
+                return True
+        except:
+            pass
+        return False
+
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]
+


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/athena/definitions.py
--- /dev/null
+++ b/yt/frontends/athena/definitions.py
@@ -0,0 +1,25 @@
+"""
+Various definitions for various other modules and routines
+
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 J.S. Oishi.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/athena/fields.py
--- /dev/null
+++ b/yt/frontends/athena/fields.py
@@ -0,0 +1,88 @@
+"""
+Athena-specific fields
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType, \
+    NullFunc, \
+    TranslationFunc
+import yt.data_objects.universal_fields
+
+log_translation_dict = {}
+
+translation_dict = {"Density": "density",
+                    "Pressure": "pressure",
+                    "x-velocity": "velocity_x",
+                    "y-velocity": "velocity_y",
+                    "z-velocity": "velocity_z",
+                    "mag_field_x": "cell_centered_B_x ",
+                    "mag_field_y": "cell_centered_B_y ",
+                    "mag_field_z": "cell_centered_B_z "}
+
+AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = AthenaFieldInfo.add_field
+
+KnownAthenaFields = FieldInfoContainer()
+add_athena_field = KnownAthenaFields.add_field
+
+add_athena_field("density", function=NullFunc, take_log=False,
+          units=r"",
+          projected_units =r"")
+
+add_athena_field("pressure", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("velocity_x", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("velocity_y", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("velocity_z", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("cell_centered_B_x", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("cell_centered_B_y", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("cell_centered_B_z", function=NullFunc, take_log=False,
+          units=r"")
+
+for f,v in log_translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=True)
+
+for f,v in translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=False)
+


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/athena/io.py
--- /dev/null
+++ b/yt/frontends/athena/io.py
@@ -0,0 +1,107 @@
+"""
+The data-file handling functions
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+from yt.utilities.io_handler import \
+           BaseIOHandler
+import numpy as np
+
+class IOHandlerAthena(BaseIOHandler):
+    _data_style = "athena"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+    _read_table_offset = None
+
+    def _field_dict(self,fhandle):
+        keys = fhandle['field_types'].keys()
+        val = fhandle['field_types'].keys()
+        return dict(zip(keys,val))
+
+    def _read_field_names(self,grid):
+        pass
+
+    def _read_data_set(self,grid,field):
+        f = file(grid.filename, 'rb')
+        dtype, offset = grid.hierarchy._field_map[field]
+        grid_ncells = np.prod(grid.ActiveDimensions)
+        grid_dims = grid.ActiveDimensions
+        read_table_offset = get_read_table_offset(f)
+        f.seek(read_table_offset+offset)
+        if dtype == 'scalar':
+            data = np.fromfile(f, dtype='>f4',
+                    count=grid_ncells).reshape(grid_dims,order='F').copy()
+        if dtype == 'vector':
+            data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
+            if '_x' in field:
+                data = data[0::3].reshape(grid_dims,order='F').copy()
+            elif '_y' in field:
+                data = data[1::3].reshape(grid_dims,order='F').copy()
+            elif '_z' in field:
+                data = data[2::3].reshape(grid_dims,order='F').copy()
+        f.close()
+        if grid.pf.field_ordering == 1:
+            return data.T
+        else:
+            return data
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        if grid.pf.field_ordering == 1:
+            sl.reverse()
+
+        f = file(grid.filename, 'rb')
+        dtype, offset = grid.hierarchy._field_map[field]
+        grid_ncells = np.prod(grid.ActiveDimensions)
+
+        read_table_offset = get_read_table_offset(f)
+        f.seek(read_table_offset+offset)
+        if dtype == 'scalar':
+            data = np.fromfile(f, dtype='>f4', 
+                    count=grid_ncells).reshape(grid.ActiveDimensions,order='F')[sl].copy()
+        if dtype == 'vector':
+            data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
+            if '_x' in field:
+                data = data[0::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+            elif '_y' in field:
+                data = data[1::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+            elif '_z' in field:
+                data = data[2::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+        f.close()
+        return data
+
+def get_read_table_offset(f):
+    line = f.readline()
+    while True:
+        splitup = line.strip().split()
+        if 'CELL_DATA' in splitup:
+            f.readline()
+            read_table_offset = f.tell()
+            break
+        line = f.readline()
+    return read_table_offset
+
+




diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/athena/setup.py
--- /dev/null
+++ b/yt/frontends/athena/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('athena', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -31,7 +31,7 @@
 from string import strip, rstrip
 from stat import ST_CTIME
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
@@ -101,18 +101,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
@@ -174,12 +167,12 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
         self.refinementFactor_unnecessary = self._global_header_lines[counter].split()
-        #na.array(map(int, self._global_header_lines[counter].split()))
+        #np.array(map(int, self._global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
         #domain_re.search(self._global_header_lines[counter]).groups()
@@ -187,9 +180,9 @@
         self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
         counter += 1
 
-        self.dx = na.zeros((self.n_levels, 3))
+        self.dx = np.zeros((self.n_levels, 3))
         for i, line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float, line.split()))
+            self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
         if self.geometry != 0:
@@ -273,8 +266,8 @@
                 counter += 1
                 zlo, zhi = map(float, self._global_header_lines[counter].split())
                 counter += 1
-                lo = na.array([xlo, ylo, zlo])
-                hi = na.array([xhi, yhi, zhi])
+                lo = np.array([xlo, ylo, zlo])
+                hi = np.array([xhi, yhi, zhi])
                 dims, start, stop = self._calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
                                                        level, gfn, gfo, dims,
@@ -296,7 +289,7 @@
     def read_particle_header(self):
         # We need to get particle offsets and particle counts
         if not self.parameter_file.use_particles:
-            self.pgrid_info = na.zeros((self.num_grids, 3), dtype='int64')
+            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
             return
 
         self.field_list += castro_particle_field_names[:]
@@ -311,7 +304,7 @@
 
         # Skip over how many grids on each level; this is degenerate
         for i in range(maxlevel+1): dummy = header.readline()
-        grid_info = na.fromiter((int(i)
+        grid_info = np.fromiter((int(i)
                                  for line in header.readlines()
                                  for i in line.split()),
                                 dtype='int64',
@@ -347,15 +340,15 @@
         self._dtype = dtype
 
     def _calculate_grid_dimensions(self, start_stop):
-        start = na.array(map(int, start_stop[0].split(',')))
-        stop = na.array(map(int, start_stop[1].split(',')))
+        start = np.array(map(int, start_stop[0].split(',')))
+        stop = np.array(map(int, start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension, start, stop
 
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
 
-        self.grids = na.concatenate([level.grids for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
         basedir = self.parameter_file.fullplotdir
 
         for g, pg in itertools.izip(self.grids, self.pgrid_info):
@@ -367,9 +360,9 @@
         self.grid_particle_count[:,0] = self.pgrid_info[:,1]
         del self.pgrid_info
 
-        gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
+        gls = np.concatenate([level.ngrids * [level.level] for level in self.levels])
         self.grid_levels[:] = gls.reshape((self.num_grids,1))
-        grid_dcs = na.concatenate([level.ngrids * [self.dx[level.level]]
+        grid_dcs = np.concatenate([level.ngrids * [self.dx[level.level]]
                                   for level in self.levels], axis=0)
 
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
@@ -384,9 +377,9 @@
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
 
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -405,7 +398,7 @@
             grid._setup_dx()
 
     def _setup_grid_tree(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         for i, grid in enumerate(self.grids):
             get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
                                 self.grid_left_edge, self.grid_right_edge,
@@ -424,10 +417,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -439,7 +432,7 @@
             except:
                 continue
 
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
 
         for field in self.field_list:
@@ -473,11 +466,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -620,9 +613,9 @@
                     else:
                         self.parameters[paramName] = t
             elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = na.array([float(i) for i in vals.split()])
+                self.domain_right_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = na.array([float(i) for i in vals.split()])
+                self.domain_left_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals)
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ b/yt/frontends/castro/io.py
@@ -25,7 +25,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 from yt.utilities.lib import \
@@ -46,7 +46,7 @@
         offset = grid._particle_offset
         filen = os.path.expanduser(grid.particle_filename)
         off = grid._particle_offset
-        tr = na.zeros(grid.NumberOfParticles, dtype='float64')
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
         read_castro_particles(filen, off,
             castro_particle_field_names.index(field),
             len(castro_particle_field_names),
@@ -85,8 +85,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int, start.split(',')))
-            stop = na.array(map(int, stop.split(',')))
+            start = np.array(map(int, start.split(',')))
+            stop = np.array(map(int, stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -126,7 +126,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile, count=nElements, dtype=dtype)
+        field = np.fromfile(inFile, count=nElements, dtype=dtype)
         field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
 
         # we can/should also check against the max and min in the header file


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -28,7 +28,7 @@
 import re
 import os
 import weakref
-import numpy as na
+import numpy as np
 
 from collections import \
      defaultdict
@@ -81,25 +81,16 @@
         if self.Parent == []:
             iLE = self.LeftEdge - self.pf.domain_left_edge
             start_index = iLE / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-            na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        self.dds = self.hierarchy.dds_list[self.Level]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class ChomboHierarchy(GridGeometryHandler):
@@ -137,18 +128,18 @@
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py                                                                                                             
-                mask=na.ones(self.num_grids)
+                mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
-                    na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                ind = na.where(mask == 1)
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
                 selected_grids = self.grids[ind]
                 # in orion, particles always live on the finest level.
                 # so, we want to assign the particle to the finest of
                 # the grids we just found
                 if len(selected_grids) != 0:
                     grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = na.where(self.grids == grid)[0][0]
+                    ind = np.where(self.grids == grid)[0][0]
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
 
@@ -176,14 +167,16 @@
         # 'Chombo_global'
         levels = f.keys()[1:]
         grids = []
+        self.dds_list = []
         i = 0
         for lev in levels:
             level_number = int(re.match('level_(\d+)',lev).groups()[0])
             boxes = f[lev]['boxes'].value
             dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
             for level_id, box in enumerate(boxes):
-                si = na.array([box['lo_%s' % ax] for ax in 'ijk'])
-                ei = na.array([box['hi_%s' % ax] for ax in 'ijk'])
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])
                 pg = self.grid(len(grids),self,level=level_number,
                                start = si, stop = ei)
                 grids.append(pg)
@@ -193,7 +186,7 @@
                 self.grid_particle_count[i] = 0
                 self.grid_dimensions[i] = ei - si + 1
                 i += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
 
     def _populate_grid_objects(self):
@@ -211,7 +204,7 @@
         self.derived_field_list = []
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
@@ -315,21 +308,21 @@
     def __calc_left_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        LE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
         fileh.close()
         return LE
 
     def __calc_right_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        RE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         fileh.close()
         return RE
                   
     def __calc_domain_dimensions(self):
         fileh = h5py.File(self.parameter_filename,'r')
-        L_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
-        R_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         return R_index - L_index
  
     @classmethod


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -33,7 +33,7 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
-import numpy as na
+import numpy as np
 
 KnownChomboFields = FieldInfoContainer()
 add_chombo_field = KnownChomboFields.add_field
@@ -98,6 +98,21 @@
 add_field("Density",function=_Density, take_log=True,
           units=r'\rm{g}/\rm{cm^3}')
 
+def _Bx(field,data):
+    return data["X-magnfield"]
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(field,data):
+    return data["Y-magnfield"]
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(field,data):
+    return data["Z-magnfield"]
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+
 def _MagneticEnergy(field,data):
     return (data["X-magnfield"]**2 +
             data["Y-magnfield"]**2 +
@@ -131,7 +146,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         else:
             return io._read_particles(data, p_field).astype(dtype)
         


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -25,7 +25,7 @@
 """
 import h5py
 import re
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
            BaseIOHandler
@@ -108,4 +108,4 @@
                     if ( (grid.LeftEdge < coord).all() and
                          (coord <= grid.RightEdge).all() ):
                         particles.append(read(line, field))
-        return na.array(particles)
+        return np.array(particles)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -25,15 +25,11 @@
 
 import h5py
 import weakref
-import numpy as na
+import numpy as np
 import os
 import stat
 import string
 import re
-try:
-    from pyhdf_np import SD
-except ImportError:
-    pass
 
 from itertools import izip
 
@@ -90,7 +86,7 @@
         my_ind = self.id - self._id_offset
         le = self.LeftEdge
         self.dds = self.Parent.dds/rf
-        ParentLeftIndex = na.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
+        ParentLeftIndex = np.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
         self.start_index = rf*(ParentLeftIndex + self.Parent.get_global_startindex()).astype('int64')
         self.LeftEdge = self.Parent.LeftEdge + self.Parent.dds * ParentLeftIndex
         self.RightEdge = self.LeftEdge + self.ActiveDimensions*self.dds
@@ -179,7 +175,7 @@
                 if self.pf.field_info[field].particle_type: continue
                 temp = self.hierarchy.io._read_raw_data_set(self, field)
                 temp = temp.swapaxes(0, 2)
-                cube.field_data[field] = na.multiply(temp, conv_factor, temp)[sl]
+                cube.field_data[field] = np.multiply(temp, conv_factor, temp)[sl]
         return cube
 
 class EnzoHierarchy(GridGeometryHandler):
@@ -291,7 +287,7 @@
         f = open(self.hierarchy_filename, "rb")
         self.grids = [self.grid(1, self)]
         self.grids[0].Level = 0
-        si, ei, LE, RE, fn, np = [], [], [], [], [], []
+        si, ei, LE, RE, fn, npart = [], [], [], [], [], []
         all = [si, ei, LE, RE, fn]
         pbar = get_pbar("Parsing Hierarchy", self.num_grids)
         if self.parameter_file.parameters["VersionNumber"] > 2.0:
@@ -310,19 +306,19 @@
             nb = int(_next_token_line("NumberOfBaryonFields", f)[0])
             fn.append(["-1"])
             if nb > 0: fn[-1] = _next_token_line("BaryonFileName", f)
-            np.append(int(_next_token_line("NumberOfParticles", f)[0]))
+            npart.append(int(_next_token_line("NumberOfParticles", f)[0]))
             if active_particles:
                 ta = int(_next_token_line( "NumberOfActiveParticles", f)[0])
                 nap.append(ta)
-            if nb == 0 and np[-1] > 0: fn[-1] = _next_token_line("ParticleFileName", f)
+            if nb == 0 and npart[-1] > 0: fn[-1] = _next_token_line("ParticleFileName", f)
             for line in f:
                 if len(line) < 2: break
                 if line.startswith("Pointer:"):
                     vv = patt.findall(line)[0]
                     self.__pointer_handler(vv)
         pbar.finish()
-        self._fill_arrays(ei, si, LE, RE, np, nap)
-        temp_grids = na.empty(self.num_grids, dtype='object')
+        self._fill_arrays(ei, si, LE, RE, npart, nap)
+        temp_grids = np.empty(self.num_grids, dtype='object')
         temp_grids[:] = self.grids
         self.grids = temp_grids
         self.filenames = fn
@@ -331,15 +327,15 @@
 
     def _initialize_grid_arrays(self):
         super(EnzoHierarchy, self)._initialize_grid_arrays()
-        self.grid_active_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_active_particle_count = np.zeros((self.num_grids,1), 'int32')
 
-    def _fill_arrays(self, ei, si, LE, RE, np, nap):
+    def _fill_arrays(self, ei, si, LE, RE, npart, nap):
         self.grid_dimensions.flat[:] = ei
-        self.grid_dimensions -= na.array(si, self.float_type)
+        self.grid_dimensions -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge.flat[:] = LE
         self.grid_right_edge.flat[:] = RE
-        self.grid_particle_count.flat[:] = np
+        self.grid_particle_count.flat[:] = npart
         if nap is not None:
             self.grid_active_particle_count.flat[:] = nap
 
@@ -394,7 +390,7 @@
             if Pid > -1:
                 grids[Pid-1]._children_ids.append(grid.id)
             self.filenames.append(pmap[P])
-        self.grids = na.array(grids, dtype='object')
+        self.grids = np.array(grids, dtype='object')
         f.close()
         mylog.info("Finished with binary hierarchy reading")
         return True
@@ -423,9 +419,9 @@
             procs.append(int(self.filenames[i][0][-4:]))
             levels.append(g.Level)
 
-        parents = na.array(parents, dtype='int64')
-        procs = na.array(procs, dtype='int64')
-        levels = na.array(levels, dtype='int64')
+        parents = np.array(parents, dtype='int64')
+        procs = np.array(procs, dtype='int64')
+        levels = np.array(levels, dtype='int64')
         f.create_dataset("/ParentIDs", data=parents)
         f.create_dataset("/Processor", data=procs)
         f.create_dataset("/Level", data=levels)
@@ -440,7 +436,7 @@
         mylog.info("Rebuilding grids on level %s", level)
         cmask = (self.grid_levels.flat == (level + 1))
         cmsum = cmask.sum()
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         for grid in self.select_grids(level):
             mask[:] = 0
             LE = self.grid_left_edge[grid.id - grid._id_offset]
@@ -456,13 +452,15 @@
         mylog.info("Finished rebuilding")
 
     def _populate_grid_objects(self):
+        reconstruct = ytcfg.getboolean("yt","reconstruct_hierarchy")
         for g,f in izip(self.grids, self.filenames):
             g._prepare_grid()
             g.NumberOfActiveParticles = \
                 self.grid_active_particle_count[g.id - g._id_offset,0]
             g._setup_dx()
             g.set_filename(f[0])
-            #if g.Parent is not None: g._guess_properties_from_parent()
+            if reconstruct:
+                if g.Parent is not None: g._guess_properties_from_parent()
         del self.filenames # No longer needed.
         self.max_level = self.grid_levels.max()
 
@@ -517,20 +515,20 @@
 
     def _generate_random_grids(self):
         if self.num_grids > 40:
-            starter = na.random.randint(0, 20)
-            random_sample = na.mgrid[starter:len(self.grids)-1:20j].astype("int32")
+            starter = np.random.randint(0, 20)
+            random_sample = np.mgrid[starter:len(self.grids)-1:20j].astype("int32")
             # We also add in a bit to make sure that some of the grids have
             # particles
             gwp = self.grid_particle_count > 0
-            if na.any(gwp) and not na.any(gwp[(random_sample,)]):
+            if np.any(gwp) and not np.any(gwp[(random_sample,)]):
                 # We just add one grid.  This is not terribly efficient.
-                first_grid = na.where(gwp)[0][0]
+                first_grid = np.where(gwp)[0][0]
                 random_sample.resize((21,))
                 random_sample[-1] = first_grid
                 mylog.debug("Added additional grid %s", first_grid)
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
-            random_sample = na.mgrid[0:max(len(self.grids),1)].astype("int32")
+            random_sample = np.mgrid[0:max(len(self.grids),1)].astype("int32")
         return self.grids[(random_sample,)]
 
     def find_particles_by_type(self, ptype, max_num=None, additional_fields=None):
@@ -558,7 +556,7 @@
         pstore = []
         for level in range(self.max_level, -1, -1):
             for grid in self.select_grids(level):
-                index = na.where(grid['particle_type'] == ptype)[0]
+                index = np.where(grid['particle_type'] == ptype)[0]
                 total += len(index)
                 pstore.append(index)
                 if total >= max_num: break
@@ -567,7 +565,7 @@
         if total > 0:
             result = {}
             for p in pfields:
-                result[p] = na.zeros(total, 'float64')
+                result[p] = np.zeros(total, 'float64')
             # Now we retrieve data for each field
             ig = count = 0
             for level in range(self.max_level, -1, -1):
@@ -630,7 +628,7 @@
                 grids[pid-1]._children_ids.append(grids[-1].id)
         self.max_level = self.grid_levels.max()
         mylog.debug("Preparing grids")
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for i, grid in enumerate(grids):
             if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
             grid.filename = None
@@ -641,7 +639,7 @@
 
     def _initialize_grid_arrays(self):
         EnzoHierarchy._initialize_grid_arrays(self)
-        self.grid_procs = na.zeros((self.num_grids,1),'int32')
+        self.grid_procs = np.zeros((self.num_grids,1),'int32')
 
     def _copy_hierarchy_structure(self):
         # Dimensions are important!
@@ -678,18 +676,18 @@
         my_rank = self.comm.rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]
         if len(my_grids) > 40:
-            starter = na.random.randint(0, 20)
-            random_sample = na.mgrid[starter:len(my_grids)-1:20j].astype("int32")
+            starter = np.random.randint(0, 20)
+            random_sample = np.mgrid[starter:len(my_grids)-1:20j].astype("int32")
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
-            random_sample = na.mgrid[0:max(len(my_grids)-1,1)].astype("int32")
+            random_sample = np.mgrid[0:max(len(my_grids)-1,1)].astype("int32")
         return my_grids[(random_sample,)]
 
 class EnzoHierarchy1D(EnzoHierarchy):
 
     def _fill_arrays(self, ei, si, LE, RE, np):
         self.grid_dimensions[:,:1] = ei
-        self.grid_dimensions[:,:1] -= na.array(si, self.float_type)
+        self.grid_dimensions[:,:1] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge[:,:1] = LE
         self.grid_right_edge[:,:1] = RE
@@ -702,7 +700,7 @@
 
     def _fill_arrays(self, ei, si, LE, RE, np):
         self.grid_dimensions[:,:2] = ei
-        self.grid_dimensions[:,:2] -= na.array(si, self.float_type)
+        self.grid_dimensions[:,:2] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge[:,:2] = LE
         self.grid_right_edge[:,:2] = RE
@@ -743,39 +741,22 @@
         StaticOutput.__init__(self, filename, data_style, file_style=file_style)
         if "InitialTime" not in self.parameters:
             self.current_time = 0.0
-        rp = os.path.join(self.directory, "rates.out")
-        if os.path.exists(rp):
-            try:
-                self.rates = EnzoTable(rp, rates_out_key)
-            except:
-                pass
-        cp = os.path.join(self.directory, "cool_rates.out")
-        if os.path.exists(cp):
-            try:
-                self.cool = EnzoTable(cp, cool_out_key)
-            except:
-                pass
-
-        # Now fixes for different types of Hierarchies
-        # This includes changing the fieldinfo class!
-        if self["TopGridRank"] == 1: self._setup_1d()
-        elif self["TopGridRank"] == 2: self._setup_2d()
 
     def _setup_1d(self):
         self._hierarchy_class = EnzoHierarchy1D
         self._fieldinfo_fallback = Enzo1DFieldInfo
         self.domain_left_edge = \
-            na.concatenate([[self.domain_left_edge], [0.0, 0.0]])
+            np.concatenate([[self.domain_left_edge], [0.0, 0.0]])
         self.domain_right_edge = \
-            na.concatenate([[self.domain_right_edge], [1.0, 1.0]])
+            np.concatenate([[self.domain_right_edge], [1.0, 1.0]])
 
     def _setup_2d(self):
         self._hierarchy_class = EnzoHierarchy2D
         self._fieldinfo_fallback = Enzo2DFieldInfo
         self.domain_left_edge = \
-            na.concatenate([self["DomainLeftEdge"], [0.0]])
+            np.concatenate([self["DomainLeftEdge"], [0.0]])
         self.domain_right_edge = \
-            na.concatenate([self["DomainRightEdge"], [1.0]])
+            np.concatenate([self["DomainRightEdge"], [1.0]])
 
     def get_parameter(self,parameter,type=None):
         """
@@ -868,7 +849,7 @@
             elif len(vals) == 1:
                 vals = pcast(vals[0])
             else:
-                vals = na.array([pcast(i) for i in vals if i != "-99999"])
+                vals = np.array([pcast(i) for i in vals if i != "-99999"])
             if param.startswith("Append") and param not in self.parameters:
                 self.parameters[param] = []
             if param.startswith("Append"):
@@ -888,17 +869,17 @@
             if len(self.domain_dimensions) < 3:
                 tmp = self.domain_dimensions.tolist()
                 tmp.append(1)
-                self.domain_dimensions = na.array(tmp)
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                self.domain_dimensions = np.array(tmp)
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64").copy()
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64").copy()
         else:
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64")
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64")
-            self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+            self.domain_dimensions = np.array([self.parameters["TopGridDimensions"],1,1])
 
         self.current_time = self.parameters["InitialTime"]
         # To be enabled when we can break old pickles:
@@ -919,6 +900,11 @@
         for ptype in self.parameters.get("AppendActiveParticleType", []):
             self.particle_types.append(ptype)
 
+        if self.dimensionality == 1:
+            self._setup_1d()
+        elif self.dimensionality == 2:
+            self._setup_2d()
+
     def _set_units(self):
         """
         Generates the conversion to various physical _units based on the parameter file
@@ -986,7 +972,7 @@
         with fortran code.
         """
         k = {}
-        k["utim"] = 2.52e17/na.sqrt(self.omega_matter)\
+        k["utim"] = 2.52e17/np.sqrt(self.omega_matter)\
                        / self.hubble_constant \
                        / (1+self.parameters["CosmologyInitialRedshift"])**1.5
         k["urho"] = 1.88e-29 * self.omega_matter \
@@ -998,8 +984,8 @@
                (1.0 + self.current_redshift)
         k["uaye"] = 1.0/(1.0 + self.parameters["CosmologyInitialRedshift"])
         k["uvel"] = 1.225e7*self.parameters["CosmologyComovingBoxSize"] \
-                      *na.sqrt(self.omega_matter) \
-                      *na.sqrt(1+ self.parameters["CosmologyInitialRedshift"])
+                      *np.sqrt(self.omega_matter) \
+                      *np.sqrt(1+ self.parameters["CosmologyInitialRedshift"])
         k["utem"] = 1.88e6 * (self.parameters["CosmologyComovingBoxSize"]**2) \
                       * self.omega_matter \
                       * (1.0 + self.parameters["CosmologyInitialRedshift"])
@@ -1039,7 +1025,7 @@
         self.conversion_factors.update(enzo.conversion_factors)
         for i in self.parameters:
             if isinstance(self.parameters[i], types.TupleType):
-                self.parameters[i] = na.array(self.parameters[i])
+                self.parameters[i] = np.array(self.parameters[i])
             if i.endswith("Units") and not i.startswith("Temperature"):
                 dataType = i[:-5]
                 self.conversion_factors[dataType] = self.parameters[i]
@@ -1047,7 +1033,7 @@
         self.domain_right_edge = self.parameters["DomainRightEdge"].copy()
         for i in self.conversion_factors:
             if isinstance(self.conversion_factors[i], types.TupleType):
-                self.conversion_factors[i] = na.array(self.conversion_factors[i])
+                self.conversion_factors[i] = np.array(self.conversion_factors[i])
         for p, v in self._parameter_override.items():
             self.parameters[p] = v
         for p, v in self._conversion_override.items():


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
@@ -42,7 +42,7 @@
 
 import yt.utilities.lib as amr_utils
 
-EnzoFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+EnzoFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo, "EFI")
 add_field = EnzoFieldInfo.add_field
 
 KnownEnzoFields = FieldInfoContainer()
@@ -171,23 +171,29 @@
 # We set up fields for both TotalEnergy and Total_Energy in the known fields
 # lists.  Note that this does not mean these will be the used definitions.
 add_enzo_field("TotalEnergy", function=NullFunc,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 add_enzo_field("Total_Energy", function=NullFunc,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Total_Energy(field, data):
     return data["TotalEnergy"] / _convertEnergy(data)
 add_field("Total_Energy", function=_Total_Energy,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
+          units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
+
+def _TotalEnergy(field, data):
+    return data["Total_Energy"] / _convertEnergy(data)
+add_field("TotalEnergy", function=_TotalEnergy,
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _NumberDensity(field, data):
     # We can assume that we at least have Density
     # We should actually be guaranteeing the presence of a .shape attribute,
     # but I am not currently implementing that
-    fieldData = na.zeros(data["Density"].shape,
+    fieldData = np.zeros(data["Density"].shape,
                          dtype = data["Density"].dtype)
     if data.pf["MultiSpecies"] == 0:
         if data.has_field_parameter("mu"):
@@ -243,7 +249,7 @@
 KnownEnzoFields["z-velocity"].projection_conversion='1'
 
 def _convertBfield(data): 
-    return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
+    return np.sqrt(4*np.pi*data.convert("Density")*data.convert("x-velocity")**2)
 for field in ['Bx','By','Bz']:
     f = KnownEnzoFields[field]
     f._convert_function=_convertBfield
@@ -318,39 +324,39 @@
     f.take_log = False
 
 def _spdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
     filter = data['creation_time'] > 0.0
     if not filter.any(): return blank
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                           data["particle_position_y"][filter].astype(na.float64),
-                           data["particle_position_z"][filter].astype(na.float64),
-                           data["particle_mass"][filter].astype(na.float32),
-                           na.int64(na.where(filter)[0].size),
-                           blank, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           data["particle_mass"][filter].astype(np.float32),
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     return blank
 add_field("star_density", function=_spdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
 def _dmpdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
     if 'creation_time' in data.pf.field_info:
         filter = data['creation_time'] <= 0.0
         if not filter.any(): return blank
     else:
-        filter = na.ones(data.NumberOfParticles, dtype='bool')
+        filter = np.ones(data.NumberOfParticles, dtype='bool')
     if not filter.any(): return blank
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                           data["particle_position_y"][filter].astype(na.float64),
-                           data["particle_position_z"][filter].astype(na.float64),
-                           data["particle_mass"][filter].astype(na.float32),
-                           na.int64(na.where(filter)[0].size),
-                           blank, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           data["particle_mass"][filter].astype(np.float32),
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     return blank
 add_field("dm_density", function=_dmpdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
@@ -361,28 +367,28 @@
     using cloud-in-cell deposit.
     """
     particle_field = field.name[4:]
-    top = na.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return top
     particle_field_data = data[particle_field] * data['particle_mass']
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                           data["particle_position_y"].astype(na.float64),
-                           data["particle_position_z"].astype(na.float64),
-                           particle_field_data.astype(na.float32),
-                           na.int64(data.NumberOfParticles),
-                           top, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                           data["particle_position_y"].astype(np.float64),
+                           data["particle_position_z"].astype(np.float64),
+                           particle_field_data.astype(np.float32),
+                           np.int64(data.NumberOfParticles),
+                           top, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     del particle_field_data
 
-    bottom = na.zeros(data.ActiveDimensions, dtype='float32')
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                           data["particle_position_y"].astype(na.float64),
-                           data["particle_position_z"].astype(na.float64),
-                           data["particle_mass"].astype(na.float32),
-                           na.int64(data.NumberOfParticles),
-                           bottom, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                           data["particle_position_y"].astype(np.float64),
+                           data["particle_position_z"].astype(np.float64),
+                           data["particle_mass"].astype(np.float32),
+                           np.int64(data.NumberOfParticles),
+                           bottom, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -400,30 +406,30 @@
     Create a grid field for star quantities, weighted by star mass.
     """
     particle_field = field.name[5:]
-    top = na.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return top
     filter = data['creation_time'] > 0.0
     if not filter.any(): return top
     particle_field_data = data[particle_field][filter] * data['particle_mass'][filter]
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                          data["particle_position_y"][filter].astype(na.float64),
-                          data["particle_position_z"][filter].astype(na.float64),
-                          particle_field_data.astype(na.float32),
-                          na.int64(na.where(filter)[0].size),
-                          top, na.array(data.LeftEdge).astype(na.float64),
-                          na.array(data.ActiveDimensions).astype(na.int32), 
-                          na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                          data["particle_position_y"][filter].astype(np.float64),
+                          data["particle_position_z"][filter].astype(np.float64),
+                          particle_field_data.astype(np.float32),
+                          np.int64(np.where(filter)[0].size),
+                          top, np.array(data.LeftEdge).astype(np.float64),
+                          np.array(data.ActiveDimensions).astype(np.int32), 
+                          np.float64(data['dx']))
     del particle_field_data
 
-    bottom = na.zeros(data.ActiveDimensions, dtype='float32')
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                          data["particle_position_y"][filter].astype(na.float64),
-                          data["particle_position_z"][filter].astype(na.float64),
-                          data["particle_mass"][filter].astype(na.float32),
-                          na.int64(na.where(filter)[0].size),
-                          bottom, na.array(data.LeftEdge).astype(na.float64),
-                          na.array(data.ActiveDimensions).astype(na.int32), 
-                          na.float64(data['dx']))
+    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                          data["particle_position_y"][filter].astype(np.float64),
+                          data["particle_position_z"][filter].astype(np.float64),
+                          data["particle_mass"][filter].astype(np.float32),
+                          np.int64(np.where(filter)[0].size),
+                          bottom, np.array(data.LeftEdge).astype(np.float64),
+                          np.array(data.ActiveDimensions).astype(np.int32), 
+                          np.float64(data['dx']))
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -460,7 +466,7 @@
           projection_conversion="1")
 
 def _StarAge(field, data):
-    star_age = na.zeros(data['StarCreationTimeYears'].shape)
+    star_age = np.zeros(data['StarCreationTimeYears'].shape)
     with_stars = data['StarCreationTimeYears'] > 0
     star_age[with_stars] = data.pf.time_units['years'] * \
         data.pf.current_time - \
@@ -479,9 +485,9 @@
 def _Bmag(field, data):
     """ magnitude of bvec
     """
-    return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
+    return np.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
 
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\rm{Gauss}")
+add_field("Bmag", function=_Bmag,display_name=r"$|B|$",units=r"\rm{Gauss}")
 
 # Particle functions
 
@@ -489,7 +495,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         try:
             return io._read_data_set(data, p_field).astype(dtype)
         except io._read_exception:
@@ -549,13 +555,13 @@
 def _convertParticleMass(data):
     return data.convert("Density")*(data.convert("cm")**3.0)
 def _IOLevelParticleMass(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
     cf = (_ParticleMass(None, dd) * _convertParticleMass(grid))[0]
     return cf
 def _convertParticleMassMsun(data):
     return data.convert("Density")*((data.convert("cm")**3.0)/1.989e33)
 def _IOLevelParticleMassMsun(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
     cf = (_ParticleMass(None, dd) * _convertParticleMassMsun(grid))[0]
     return cf
 add_field("ParticleMass",
@@ -578,7 +584,7 @@
     if data['dx'].size == 1:
         try:
             return data['dx']*data['dy']*\
-                na.ones(data.ActiveDimensions, dtype='float64')
+                np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
             return data['dx']*data['dy']
     return data["dx"]*data["dy"]
@@ -600,11 +606,10 @@
         Enzo2DFieldInfo["CellArea%s" % a]
 
 def _zvel(field, data):
-    return na.zeros(data["x-velocity"].shape,
+    return np.zeros(data["x-velocity"].shape,
                     dtype='float64')
 add_enzo_2d_field("z-velocity", function=_zvel)
 
-
 #
 # Now we do overrides for 1D fields
 #
@@ -632,7 +637,7 @@
         Enzo1DFieldInfo["CellLength%s" % a]
 
 def _yvel(field, data):
-    return na.zeros(data["x-velocity"].shape,
+    return np.zeros(data["x-velocity"].shape,
                     dtype='float64')
 add_enzo_1d_field("z-velocity", function=_zvel)
 add_enzo_1d_field("y-velocity", function=_yvel)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -32,7 +32,7 @@
 from yt.utilities.logger import ytLogger as mylog
 import h5py
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 
 class IOHandlerPackedHDF5(BaseIOHandler):
@@ -72,7 +72,7 @@
         read_fields = fields[:]
         for field in fields:
             # TODO: figure out dataset types
-            rv[field] = na.empty(size, dtype='float64')
+            rv[field] = np.empty(size, dtype='float64')
         for pfield in pfields:
             if pfield not in fields: read_fields.append(pfield)
         ind = 0
@@ -118,7 +118,7 @@
         read_fields = fields[:]
         for field in fields:
             # TODO: figure out dataset types
-            rv[field] = na.empty(size, dtype='float64')
+            rv[field] = np.empty(size, dtype='float64')
         for pfield in pfields:
             if pfield not in fields: read_fields.append(pfield)
         ng = sum(len(c.objs) for c in chunks)
@@ -149,7 +149,7 @@
         for field in fields:
             ftype, fname = field
             fsize = size
-            rv[field] = na.empty(fsize, dtype="float64")
+            rv[field] = np.empty(fsize, dtype="float64")
         ind = 0
         ng = sum(len(c.objs) for c in chunks)
         mylog.debug("Reading %s cells of %s fields in %s grids",


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -25,7 +25,7 @@
 
 from yt.funcs import *
 
-import numpy as na
+import numpy as np
 import glob
 import os
 
@@ -236,8 +236,8 @@
             else:
                 my_final_time = self.final_time
 
-            my_times = na.array(map(lambda a:a['time'], my_all_outputs))
-            my_indices = na.digitize([my_initial_time, my_final_time], my_times)
+            my_times = np.array(map(lambda a:a['time'], my_all_outputs))
+            my_indices = np.digitize([my_initial_time, my_final_time], my_times)
             if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
             my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
 
@@ -294,7 +294,7 @@
             elif len(vals) == 1:
                 vals = pcast(vals[0])
             else:
-                vals = na.array([pcast(i) for i in vals if i != "-99999"])
+                vals = np.array([pcast(i) for i in vals if i != "-99999"])
             self.parameters[param] = vals
         self.refine_by = self.parameters["RefineBy"]
         self.dimensionality = self.parameters["TopGridRank"]
@@ -303,17 +303,17 @@
             if len(self.domain_dimensions) < 3:
                 tmp = self.domain_dimensions.tolist()
                 tmp.append(1)
-                self.domain_dimensions = na.array(tmp)
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                self.domain_dimensions = np.array(tmp)
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64").copy()
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64").copy()
         else:
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64")
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64")
-            self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+            self.domain_dimensions = np.array([self.parameters["TopGridDimensions"],1,1])
 
         if self.parameters["ComovingCoordinates"]:
             cosmo_attr = {'box_size': 'CosmologyComovingBoxSize',
@@ -374,7 +374,7 @@
                     current_time * self.enzo_cosmology.TimeUnits)
 
             self.all_time_outputs.append(output)
-            if na.abs(self.final_time - current_time) / self.final_time < 1e-4: break
+            if np.abs(self.final_time - current_time) / self.final_time < 1e-4: break
             current_time += self.parameters['dtDataDump']
             index += 1
 
@@ -476,8 +476,8 @@
         self.parameters['RedshiftDumpDir'] = "RD"
         self.parameters['ComovingCoordinates'] = 0
         self.parameters['TopGridRank'] = 3
-        self.parameters['DomainLeftEdge'] = na.zeros(self.parameters['TopGridRank'])
-        self.parameters['DomainRightEdge'] = na.ones(self.parameters['TopGridRank'])
+        self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
+        self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
         self.parameters['Refineby'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
@@ -491,7 +491,7 @@
 
         self.time_units = {}
         if self.cosmological_simulation:
-            self.parameters['TimeUnits'] = 2.52e17 / na.sqrt(self.omega_matter) \
+            self.parameters['TimeUnits'] = 2.52e17 / np.sqrt(self.omega_matter) \
                 / self.hubble_constant / (1 + self.initial_redshift)**1.5
         self.time_units['1'] = 1.
         self.time_units['seconds'] = self.parameters['TimeUnits']
@@ -586,8 +586,8 @@
             outputs = self.all_outputs
         my_outputs = []
         for value in values:
-            outputs.sort(key=lambda obj:na.fabs(value - obj[key]))
-            if (tolerance is None or na.abs(value - outputs[0][key]) <= tolerance) \
+            outputs.sort(key=lambda obj:np.fabs(value - obj[key]))
+            if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \
                     and outputs[0] not in my_outputs:
                 my_outputs.append(outputs[0])
             else:
@@ -649,7 +649,7 @@
 
         """
 
-        times = na.array(times) / self.time_units[time_units]
+        times = np.array(times) / self.time_units[time_units]
         return self._get_outputs_by_key('time', times, tolerance=tolerance,
                                         outputs=outputs)
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/enzo/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -0,0 +1,50 @@
+"""
+Enzo frontend tests using moving7
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    standard_patch_amr, \
+    data_dir_load
+from yt.frontends.enzo.api import EnzoStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
+           "particle_density")
+
+m7 = "DD0010/moving7_0010"
+ at requires_pf(m7)
+def test_moving7():
+    pf = data_dir_load(m7)
+    yield assert_equal, str(pf), "moving7_0010"
+    for test in standard_patch_amr(m7, _fields):
+        yield test
+
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+ at requires_pf(g30)
+def test_galaxy0030():
+    pf = data_dir_load(g30)
+    yield assert_equal, str(pf), "galaxy0030"
+    for test in standard_patch_amr(g30, _fields):
+        yield test


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -25,7 +25,7 @@
 
 import h5py
 import stat
-import numpy as na
+import numpy as np
 import weakref
 
 from yt.config import ytcfg
@@ -42,11 +42,11 @@
     mpc_conversion, sec_conversion
 from yt.utilities.io_handler import \
     io_registry
-
+from yt.utilities.physical_constants import cm_per_mpc
 from .fields import FLASHFieldInfo, add_flash_field, KnownFLASHFields, \
     CylindricalFLASHFieldInfo, PolarFLASHFieldInfo
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
-     ValidateDataField
+     ValidateDataField, TranslationFunc
 
 class FLASHGrid(AMRGridPatch):
     _id_offset = 1
@@ -78,7 +78,7 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self._handle = pf._handle
 
-        self.float_type = na.float64
+        self.float_type = np.float64
         GridGeometryHandler.__init__(self,pf,data_style)
 
     def _initialize_data_storage(self):
@@ -131,36 +131,39 @@
             self.grid_particle_count[:] = f["/localnp"][:][:,None]
         except KeyError:
             self.grid_particle_count[:] = 0.0
-        self._particle_indices = na.zeros(self.num_grids + 1, dtype='int64')
-        na.add.accumulate(self.grid_particle_count.squeeze(),
+        self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
+        np.add.accumulate(self.grid_particle_count.squeeze(),
                           out=self._particle_indices[1:])
         # This will become redundant, as _prepare_grid will reset it to its
         # current value.  Note that FLASH uses 1-based indexing for refinement
         # levels, but we do not, so we reduce the level by 1.
         self.grid_levels.flat[:] = f["/refine level"][:][:] - 1
-        self.grids = na.empty(self.num_grids, dtype='object')
+        self.grids = np.empty(self.num_grids, dtype='object')
         for i in xrange(self.num_grids):
             self.grids[i] = self.grid(i+1, self, self.grid_levels[i,0])
         
 
         # This is a possibly slow and verbose fix, and should be re-examined!
-        rdx = (self.parameter_file.domain_right_edge -
-                self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+        rdx = (self.parameter_file.domain_width /
+                self.parameter_file.domain_dimensions)
         nlevels = self.grid_levels.max()
-        dxs = na.zeros((nlevels+1,3),dtype='float64')
+        dxs = np.ones((nlevels+1,3),dtype='float64')
         for i in range(nlevels+1):
-            dxs[i] = rdx/self.parameter_file.refine_by**i
+            dxs[i,:ND] = rdx[:ND]/self.parameter_file.refine_by**i
        
+        if ND < 3:
+            dxs[:,ND:] = rdx[ND:]
+
         for i in xrange(self.num_grids):
             dx = dxs[self.grid_levels[i],:]
-            self.grid_left_edge[i] = na.rint(self.grid_left_edge[i]/dx)*dx
-            self.grid_right_edge[i] = na.rint(self.grid_right_edge[i]/dx)*dx
+            self.grid_left_edge[i][:ND] = np.rint(self.grid_left_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
+            self.grid_right_edge[i][:ND] = np.rint(self.grid_right_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
                         
     def _populate_grid_objects(self):
         # We only handle 3D data, so offset is 7 (nfaces+1)
         
         offset = 7
-        ii = na.argsort(self.grid_levels.flat)
+        ii = np.argsort(self.grid_levels.flat)
         gid = self._handle["/gid"][:]
         first_ind = -(self.parameter_file.refine_by**self.parameter_file.dimensionality)
         for g in self.grids[ii].flat:
@@ -192,11 +195,16 @@
                 self.derived_field_list.append(field)
             if (field not in KnownFLASHFields and
                 field.startswith("particle")) :
-                self.parameter_file.field_info.add_field(field,
-                                                         function=NullFunc,
-                                                         take_log=False,
-                                                         validators = [ValidateDataField(field)],
-                                                         particle_type=True)
+                self.parameter_file.field_info.add_field(
+                        field, function=NullFunc, take_log=False,
+                        validators = [ValidateDataField(field)],
+                        particle_type=True)
+
+        for field in self.derived_field_list:
+            f = self.parameter_file.field_info[field]
+            if f._function.func_name == "_TranslationFunc":
+                # Translating an already-converted field
+                self.parameter_file.conversion_factors[field] = 1.0 
                 
     def _setup_data_io(self):
         self.io = io_registry[self.data_style](self.parameter_file)
@@ -218,6 +226,7 @@
                  storage_filename = None,
                  conversion_override = None):
 
+        if self._handle is not None: return
         self._handle = h5py.File(filename, "r")
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
@@ -244,13 +253,13 @@
         self.conversion_factors = defaultdict(lambda: 1.0)
         if "EOSType" not in self.parameters:
             self.parameters["EOSType"] = -1
-        if self.cosmological_simulation == 1:
-            self._setup_comoving_units()
         if "pc_unitsbase" in self.parameters:
             if self.parameters["pc_unitsbase"] == "CGS":
                 self._setup_cgs_units()
         else:
             self._setup_nounits_units()
+        if self.cosmological_simulation == 1:
+            self._setup_comoving_units()
         self.time_units['1'] = 1
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0 / \
@@ -267,10 +276,10 @@
         self.conversion_factors['eint'] = (1.0 + self.current_redshift)**-2.0
         self.conversion_factors['ener'] = (1.0 + self.current_redshift)**-2.0
         self.conversion_factors['temp'] = (1.0 + self.current_redshift)**-2.0
-        self.conversion_factors['velx'] = (1.0 + self.current_redshift)
+        self.conversion_factors['velx'] = (1.0 + self.current_redshift)**-1.0
         self.conversion_factors['vely'] = self.conversion_factors['velx']
         self.conversion_factors['velz'] = self.conversion_factors['velx']
-        self.conversion_factors['particle_velx'] = (1.0 + self.current_redshift)
+        self.conversion_factors['particle_velx'] = (1.0 + self.current_redshift)**-1.0
         self.conversion_factors['particle_vely'] = \
             self.conversion_factors['particle_velx']
         self.conversion_factors['particle_velz'] = \
@@ -280,7 +289,8 @@
             self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
-
+            self.units[unit] /= (1.0+self.current_redshift)
+            
     def _setup_cgs_units(self):
         self.conversion_factors['dens'] = 1.0
         self.conversion_factors['pres'] = 1.0
@@ -378,9 +388,9 @@
                     if vn in self.parameters and self.parameters[vn] != pval:
                         mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn))
                     self.parameters[vn] = pval
-        self.domain_left_edge = na.array(
+        self.domain_left_edge = np.array(
             [self.parameters["%smin" % ax] for ax in 'xyz']).astype("float64")
-        self.domain_right_edge = na.array(
+        self.domain_right_edge = np.array(
             [self.parameters["%smax" % ax] for ax in 'xyz']).astype("float64")
         self.min_level = self.parameters.get("lrefine_min", 1) - 1
 
@@ -417,7 +427,7 @@
         nblocky = self.parameters["nblocky"]
         nblockz = self.parameters["nblockz"]
         self.domain_dimensions = \
-            na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
+            np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
         try:
             self.parameters["Gamma"] = self.parameters["gamma"]
         except:
@@ -433,6 +443,7 @@
             self.omega_lambda = self.parameters['cosmologicalconstant']
             self.omega_matter = self.parameters['omegamatter']
             self.hubble_constant = self.parameters['hubbleconstant']
+            self.hubble_constant *= cm_per_mpc * 1.0e-5 * 1.0e-2 # convert to 'h'
         except:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
@@ -443,7 +454,7 @@
     def _setup_cylindrical_coordinates(self):
         if self.dimensionality == 2:
             self.domain_left_edge[2] = 0.0
-            self.domain_right_edge[2] = 2.0 * na.pi
+            self.domain_right_edge[2] = 2.0 * np.pi
 
     def _setup_polar_coordinates(self):
         pass
@@ -474,3 +485,5 @@
         except:
             pass
         return False
+
+


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -36,7 +36,7 @@
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
     kboltz
-import numpy as na
+import numpy as np
 from yt.utilities.exceptions import *
 KnownFLASHFields = FieldInfoContainer()
 add_flash_field = KnownFLASHFields.add_field
@@ -105,7 +105,10 @@
     if fn1.endswith("_Fraction"):
         add_field(fn1.split("_")[0] + "_Density",
                   function=_get_density(fn1), take_log=True,
-                  display_name="%s\/Density" % fn1.split("_")[0])
+                  display_name="%s\/Density" % fn1.split("_")[0],
+                  units = r"\rm{g}/\rm{cm}^3",
+                  projected_units = r"\rm{g}/\rm{cm}^2",
+                  )
 
 def _get_convert(fname):
     def _conv(data):
@@ -114,7 +117,8 @@
 
 add_flash_field("dens", function=NullFunc, take_log=True,
                 convert_function=_get_convert("dens"),
-                units=r"\rm{g}/\rm{cm}^3")
+                units=r"\rm{g}/\rm{cm}^3",
+                projected_units = r"\rm{g}/\rm{cm}^2"),
 add_flash_field("velx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("velx"),
                 units=r"\rm{cm}/\rm{s}")
@@ -241,7 +245,8 @@
 add_flash_field("targ", function=NullFunc, take_log=False,
                 display_name="Target Material Fraction")
 add_flash_field("sumy", function=NullFunc, take_log=False)
-add_flash_field("mgdc", function=NullFunc, take_log=False)
+add_flash_field("mgdc", function=NullFunc, take_log=False,
+                display_name="Emission Minus Absorption Diffusion Terms")
 
 for i in range(1, 1000):
     add_flash_field("r{0:03}".format(i), function=NullFunc, take_log=False,
@@ -263,6 +268,7 @@
     add_field(f, TranslationFunc(v),
               take_log=KnownFLASHFields[v].take_log,
               units = ff._units, display_name=dname,
+              projected_units = ff._projected_units,
               particle_type = pfield)
 
 def _convertParticleMassMsun(data):
@@ -315,6 +321,46 @@
 add_field("GasEnergy", function=_GasEnergy, 
           units=r"\rm{ergs}/\rm{g}")
 
+# See http://flash.uchicago.edu/pipermail/flash-users/2012-October/001180.html
+# along with the attachment to that e-mail for details
+def GetMagRescalingFactor(pf):
+    if pf['unitsystem'].lower() == "cgs":
+         factor = 1
+    if pf['unitsystem'].lower() == "si":
+         factor = np.sqrt(4*np.pi/1e7)
+    if pf['unitsystem'].lower() == "none":
+         factor = np.sqrt(4*np.pi)
+    else:
+        raise RuntimeError("Runtime parameter unitsystem with"
+                           "value %s is unrecognized" % pf['unitsystem'])
+    return factor
+
+def _Bx(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magx']*factor
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magy']*factor
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magz']*factor
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+
+def _DivB(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['divb']*factor
+add_field("DivB", function=_DivB, take_log=False,
+          units=r"\rm{Gauss}\/\rm{cm}^{-1}")
+
+
+
 def _unknown_coord(field, data):
     raise YTCoordinateNotImplemented
 add_cyl_field("dx", function=_unknown_coord)
@@ -323,40 +369,40 @@
 add_cyl_field("y", function=_unknown_coord)
 
 def _dr(field, data):
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
 add_cyl_field('dr', function=_dr, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _dz(field, data):
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
 add_cyl_field('dz', function=_dz,
           display_field=False, validators=[ValidateSpatial(0)])
 
 def _dtheta(field, data):
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
 add_cyl_field('dtheta', function=_dtheta,
           display_field=False, validators=[ValidateSpatial(0)])
 
 def _coordR(field, data):
     dim = data.ActiveDimensions[0]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[0])[:,None,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[0])[:,None,None]
             +0.5) * data['dr'] + data.LeftEdge[0]
 add_cyl_field('r', function=_coordR, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _coordZ(field, data):
     dim = data.ActiveDimensions[1]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[1])[None,:,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[1])[None,:,None]
             +0.5) * data['dz'] + data.LeftEdge[1]
 add_cyl_field('z', function=_coordZ, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _coordTheta(field, data):
     dim = data.ActiveDimensions[2]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[2])[None,None,:]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[2])[None,None,:]
             +0.5) * data['dtheta'] + data.LeftEdge[2]
 add_cyl_field('theta', function=_coordTheta, display_field=False,
           validators=[ValidateSpatial(0)])
@@ -373,27 +419,27 @@
 add_pol_field("y", function=_unknown_coord)
 
 def _dr(field, data):
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
 add_pol_field('dr', function=_dr, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _dtheta(field, data):
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
 add_pol_field('dtheta', function=_dtheta,
           display_field=False, validators=[ValidateSpatial(0)])
 
 def _coordR(field, data):
     dim = data.ActiveDimensions[0]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[0])[:,None,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[0])[:,None,None]
             +0.5) * data['dr'] + data.LeftEdge[0]
 add_pol_field('r', function=_coordR, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _coordTheta(field, data):
     dim = data.ActiveDimensions[2]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[1])[None,:,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[1])[None,:,None]
             +0.5) * data['dtheta'] + data.LeftEdge[1]
 add_pol_field('theta', function=_coordTheta, display_field=False,
           validators=[ValidateSpatial(0)])
@@ -406,12 +452,12 @@
 ## Derived FLASH Fields
 def _nele(field, data):
     return data['ye'] * data['dens'] * data['sumy'] * 6.022E23
-add_field('nele', function=_nele, take_log=True, units=r"\rm{n}/\rm{cm}^3")
-add_field('edens', function=_nele, take_log=True, units=r"\rm{n}/\rm{cm}^3")
+add_field('nele', function=_nele, take_log=True, units=r"\rm{cm}^{-3}")
+add_field('edens', function=_nele, take_log=True, units=r"\rm{cm}^{-3}")
 
 def _nion(field, data):
     return data['dens'] * data['sumy'] * 6.022E23
-add_field('nion', function=_nion, take_log=True, units=r"\rm{n}/\rm{cm}^3")
+add_field('nion', function=_nion, take_log=True, units=r"\rm{cm}^{-3}")
 
 
 def _abar(field, data):


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import h5py
 
 from yt.utilities.io_handler import \
@@ -52,27 +52,10 @@
             count_list, conv_factors):
         pass
 
-    def _select_particles(self, grid, field):
-        f = self._handle
-        npart = f["/tracer particles"].shape[0]
-        total_selected = 0
-        start = 0
-        stride = 1e6
-        blki = self._particle_fields["particle_blk"]
-        bi = grid.id - grid._id_offset
-        fi = self._particle_fields[field]
-        tr = []
-        while start < npart:
-            end = min(start + stride - 1, npart)
-            gi = f["/tracer particles"][start:end,blki] == bi
-            tr.append(f["/tracer particles"][gi,fi])
-            start = end
-        return na.concatenate(tr)
-
     def _read_data_set(self, grid, field):
         f = self._handle
         if field in self._particle_fields:
-            if grid.NumberOfParticles == 0: return na.array([], dtype='float64')
+            if grid.NumberOfParticles == 0: return np.array([], dtype='float64')
             start = self.pf.h._particle_indices[grid.id - grid._id_offset]
             end = self.pf.h._particle_indices[grid.id - grid._id_offset + 1]
             fi = self._particle_fields[field]
@@ -96,7 +79,7 @@
         rv = {}
         for field in fields:
             ftype, fname = field
-            rv[field] = na.empty(size, dtype=f["/%s" % fname].dtype)
+            rv[field] = np.empty(size, dtype=f["/%s" % fname].dtype)
         ng = sum(len(c.objs) for c in chunks)
         mylog.debug("Reading %s cells of %s fields in %s blocks",
                     size, [f2 for f1, f2 in fields], ng)




diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -26,7 +26,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 from itertools import izip
 
 from yt.funcs import *
@@ -104,7 +104,7 @@
         
     def _parse_hierarchy(self):
         f = self._handle # shortcut
-        npa = na.array
+        npa = np.array
         DLE = self.parameter_file.domain_left_edge
         DRE = self.parameter_file.domain_right_edge
         DW = (DRE - DLE)
@@ -119,12 +119,12 @@
                                 + dxs *(1 + self.grid_dimensions)
         self.grid_particle_count.flat[:] = f['/grid_particle_count'][:].astype("int32")
         grid_parent_id = f['/grid_parent_id'][:]
-        self.max_level = na.max(self.grid_levels)
+        self.max_level = np.max(self.grid_levels)
         
         args = izip(xrange(self.num_grids), self.grid_levels.flat,
                     grid_parent_id, LI,
                     self.grid_dimensions, self.grid_particle_count.flat)
-        self.grids = na.empty(len(args), dtype='object')
+        self.grids = np.empty(len(args), dtype='object')
         for gi, (j,lvl,p, le, d, n) in enumerate(args):
             self.grids[gi] = self.grid(self,j,d,le,lvl,p,n)
         


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import \


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ b/yt/frontends/gadget/io.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -38,9 +38,9 @@
             address = '/data/grid_%010i/particles/%s/%s' % (grid.id, ptype, field)
             data.append(fh[address][:])
         if len(data) > 0:
-            data = na.concatenate(data)
+            data = np.concatenate(data)
         fh.close()
-        return na.array(data)
+        return np.array(data)
     def _read_field_names(self,grid): 
         adr = grid.Address
         fh = h5py.File(grid.filename,mode='r')


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -28,7 +28,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 import weakref
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
@@ -37,6 +37,8 @@
            GridGeometryHandler
 from yt.data_objects.static_output import \
            StaticOutput
+from yt.utilities.lib import \
+    get_box_grids_level
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 
@@ -71,7 +73,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -108,11 +110,11 @@
     def _parse_hierarchy(self):
         f = self._fhandle
         dxs = []
-        self.grids = na.empty(self.num_grids, dtype='object')
+        self.grids = np.empty(self.num_grids, dtype='object')
         levels = (f['grid_level'][:]).copy()
         glis = (f['grid_left_index'][:]).copy()
         gdims = (f['grid_dimensions'][:]).copy()
-        active_dims = ~((na.max(gdims, axis=0) == 1) &
+        active_dims = ~((np.max(gdims, axis=0) == 1) &
                         (self.parameter_file.domain_dimensions == 1))
 
         for i in range(levels.shape[0]):
@@ -125,7 +127,7 @@
                   self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
             dx[active_dims] = dx[active_dims]/self.parameter_file.refine_by**(levels[i])
             dxs.append(dx)
-        dx = na.array(dxs)
+        dx = np.array(dxs)
         self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
         self.grid_dimensions = gdims.astype("int32")
         self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
@@ -133,21 +135,32 @@
         del levels, glis, gdims
 
     def _populate_grid_objects(self):
-        for g in self.grids:
+        mask = np.empty(self.grids.size, dtype='int32')
+        for gi, g in enumerate(self.grids):
             g._prepare_grid()
             g._setup_dx()
 
-        for g in self.grids:
+        for gi, g in enumerate(self.grids):
             g.Children = self._get_grid_children(g)
             for g1 in g.Children:
                 g1.Parent.append(g)
+            get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                self.grid_levels[gi],
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            m = mask.astype("bool")
+            m[gi] = False
+            siblings = self.grids[gi:][m[gi:]]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
         self.max_level = self.grid_levels.max()
 
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -28,7 +28,7 @@
 import re
 import os
 import weakref
-import numpy as na
+import numpy as np
 
 from collections import \
     defaultdict
@@ -102,17 +102,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
@@ -170,9 +164,9 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
         self.refinementFactor_unnecessary = self.__global_header_lines[counter].split()
         counter += 1
@@ -181,9 +175,9 @@
         counter += 1 # unused line in Maestro BoxLib
         
         counter += 1
-        self.dx = na.zeros((self.n_levels,3))
+        self.dx = np.zeros((self.n_levels,3))
         for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float,line.split()))
+            self.dx[i] = np.array(map(float,line.split()))
 
         counter += self.n_levels # unused line in Maestro BoxLib
         
@@ -259,8 +253,8 @@
                 counter+=1
                 zlo,zhi = map(float,self.__global_header_lines[counter].split())
                 counter+=1
-                lo = na.array([xlo,ylo,zlo])
-                hi = na.array([xhi,yhi,zhi])
+                lo = np.array([xlo,ylo,zlo])
+                hi = np.array([xhi,yhi,zhi])
                 dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read,hierarchy=self))
                 grid_counter += 1 # this is global, and shouldn't be reset
@@ -304,17 +298,17 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self,start_stop):
-        start = na.array(map(int,start_stop[0].split(',')))
-        stop = na.array(map(int,start_stop[1].split(',')))
+        start = np.array(map(int,start_stop[0].split(',')))
+        stop = np.array(map(int,start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension,start,stop
         
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
-        self.grids = na.concatenate([level.grids for level in self.levels])
-        self.grid_levels = na.concatenate([level.ngrids*[level.level] for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
+        self.grid_levels = np.concatenate([level.ngrids*[level.level] for level in self.levels])
         self.grid_levels = self.grid_levels.reshape((self.num_grids,1))
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
         self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
         self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
@@ -325,9 +319,9 @@
             left_edges += [g.LeftEdge for g in level.grids]
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -354,10 +348,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -367,7 +361,7 @@
                 fd = self.field_info[field].get_dependencies(pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -381,11 +375,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -494,9 +488,9 @@
                 t = parameterTypes[paramName](val)
                 exec("self.%s = %s" % (paramName,t))
 
-        self.domain_dimensions = na.array([_n_cellx,_n_celly,_n_cellz])
-        self.domain_left_edge = na.array([_prob_lo_x,_prob_lo_y,_prob_lo_z])
-        self.domain_right_edge = na.array([_prob_hi_x,_prob_hi_y,_prob_hi_z])
+        self.domain_dimensions = np.array([_n_cellx,_n_celly,_n_cellz])
+        self.domain_left_edge = np.array([_prob_lo_x,_prob_lo_y,_prob_lo_z])
+        self.domain_right_edge = np.array([_prob_hi_x,_prob_hi_y,_prob_hi_z])
         
         self.cosmological_simulation = self.current_redshift = \
             self.omega_matter = self.omega_lambda = self.hubble_constant = 0


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/maestro/io.py
--- a/yt/frontends/maestro/io.py
+++ b/yt/frontends/maestro/io.py
@@ -28,7 +28,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 
@@ -72,8 +72,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int,start.split(',')))
-            stop = na.array(map(int,stop.split(',')))
+            start = np.array(map(int,start.split(',')))
+            stop = np.array(map(int,stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -113,7 +113,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile,count=nElements,dtype=dtype)
+        field = np.fromfile(inFile,count=nElements,dtype=dtype)
         field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
 
         # we can/should also check against the max and min in the header file


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -35,7 +35,7 @@
 from string import strip, rstrip
 import weakref
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import AMRGridPatch
@@ -100,18 +100,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is. We don't assume that
-        # dx=dy=dz here.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE - LE) / self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
@@ -172,20 +165,20 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.refinementFactor_unnecessary = self._global_header_lines[counter].split() #na.array(map(int, self._global_header_lines[counter].split()))
+        self.refinementFactor_unnecessary = self._global_header_lines[counter].split() #np.array(map(int, self._global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
         counter += 1
         self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
         counter += 1
 
-        self.dx = na.zeros((self.n_levels, 3))
+        self.dx = np.zeros((self.n_levels, 3))
         for i, line in enumerate(self._global_header_lines[counter:counter + self.n_levels]):
-            self.dx[i] = na.array(map(float, line.split()))
+            self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
         if self.geometry != 0:
@@ -269,8 +262,8 @@
                 counter += 1
                 zlo, zhi = map(float, self._global_header_lines[counter].split())
                 counter += 1
-                lo = na.array([xlo, ylo, zlo])
-                hi = na.array([xhi, yhi, zhi])
+                lo = np.array([xlo, ylo, zlo])
+                hi = np.array([xhi, yhi, zhi])
                 dims, start, stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
                                              level, gfn, gfo, dims, start, stop,
@@ -290,7 +283,7 @@
     def read_particle_header(self):
         # We need to get particle offsets and particle counts
         if not self.parameter_file.use_particles:
-            self.pgrid_info = na.zeros((self.num_grids, 3), dtype='int64')
+            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
             return
         self.field_list += nyx_particle_field_names[:]
         header = open(os.path.join(self.parameter_file.path, "DM", "Header"))
@@ -304,7 +297,7 @@
         # Skip over how many grids on each level; this is degenerate
         for i in range(maxlevel + 1):dummy = header.readline()
 
-        grid_info = na.fromiter((int(i) for line in header.readlines()
+        grid_info = np.fromiter((int(i) for line in header.readlines()
                                  for i in line.split()),
                                 dtype='int64',
                                 count=3*self.num_grids).reshape((self.num_grids, 3))
@@ -341,15 +334,15 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self, start_stop):
-        start = na.array(map(int, start_stop[0].split(',')))
-        stop = na.array(map(int, start_stop[1].split(',')))
+        start = np.array(map(int, start_stop[0].split(',')))
+        stop = np.array(map(int, start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension, start, stop
 
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
 
-        self.grids = na.concatenate([level.grids for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
         basedir = self.parameter_file.path
         for g, pg in itertools.izip(self.grids, self.pgrid_info):
             g.particle_filename = os.path.join(basedir, "DM",
@@ -361,9 +354,9 @@
         self.grid_particle_count[:, 0] = self.pgrid_info[:, 1]
         del self.pgrid_info
 
-        gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
+        gls = np.concatenate([level.ngrids * [level.level] for level in self.levels])
         self.grid_levels[:] = gls.reshape((self.num_grids, 1))
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]]
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]]
                                    for level in self.levels], axis=0)
 
         self.grid_dxs = grid_dcs[:, 0].reshape((self.num_grids, 1))
@@ -378,9 +371,9 @@
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
 
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]  # why the same thing twice?
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -398,7 +391,7 @@
             grid._setup_dx()
 
     def __setup_grid_tree(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         for i, grid in enumerate(self.grids):
             get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
                                 self.grid_left_edge, self.grid_right_edge,
@@ -415,10 +408,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level + 1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level + 1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -444,11 +437,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids, 3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids, 3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids, 3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids, 1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids, 1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids, 3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids, 3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids, 3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids, 1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids, 1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -464,7 +457,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -607,9 +600,9 @@
                         self.parameters[param_name] = vals
 
             elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = na.array([float(i) for i in vals])
+                self.domain_right_edge = np.array([float(i) for i in vals])
             elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = na.array([float(i) for i in vals])
+                self.domain_left_edge = np.array([float(i) for i in vals])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals[0])
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/nyx/io.py
--- a/yt/frontends/nyx/io.py
+++ b/yt/frontends/nyx/io.py
@@ -27,7 +27,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.lib import read_castro_particles, read_and_seek
 from yt.utilities.io_handler import BaseIOHandler
 
@@ -46,7 +46,7 @@
         offset = grid._particle_offset
         filen = os.path.expanduser(grid.particle_filename)
         off = grid._particle_offset
-        tr = na.zeros(grid.NumberOfParticles, dtype='float64')
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
         read_castro_particles(filen, off,
                               nyx_particle_field_names.index(field),
                               len(nyx_particle_field_names), tr)
@@ -68,7 +68,7 @@
         offset2 = int(nElements*bytesPerReal*field_index)
 
         dtype = grid.hierarchy._dtype
-        field = na.empty(nElements, dtype=grid.hierarchy._dtype)
+        field = np.empty(nElements, dtype=grid.hierarchy._dtype)
         read_and_seek(filen, offset1, offset2, field, nElements * bytesPerReal)
         field = field.reshape(grid.ActiveDimensions, order='F')
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -31,7 +31,7 @@
 from string import strip, rstrip
 from stat import ST_CTIME
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
@@ -99,17 +99,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
@@ -139,7 +133,7 @@
         simply add it to the if/elif/else block.
 
         """
-        self.grid_particle_count = na.zeros(len(self.grids))
+        self.grid_particle_count = np.zeros(len(self.grids))
 
         for particle_filename in ["StarParticles", "SinkParticles"]:
             fn = os.path.join(self.pf.fullplotdir, particle_filename)
@@ -160,18 +154,18 @@
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py
-                mask=na.ones(self.num_grids)
+                mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
-                    na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                ind = na.where(mask == 1)
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
                 selected_grids = self.grids[ind]
                 # in orion, particles always live on the finest level.
                 # so, we want to assign the particle to the finest of
                 # the grids we just found
                 if len(selected_grids) != 0:
                     grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = na.where(self.grids == grid)[0][0]
+                    ind = np.where(self.grids == grid)[0][0]
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
         return True
@@ -211,20 +205,20 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #na.array(map(int,self.__global_header_lines[counter].split()))
+        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #np.array(map(int,self.__global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self.__global_header_lines[counter]
         #domain_re.search(self.__global_header_lines[counter]).groups()
         counter += 1
         self.timestepsPerLevel_unnecessary = self.__global_header_lines[counter]
         counter += 1
-        self.dx = na.zeros((self.n_levels,3))
+        self.dx = np.zeros((self.n_levels,3))
         for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float,line.split()))
+            self.dx[i] = np.array(map(float,line.split()))
         counter += self.n_levels
         self.geometry = int(self.__global_header_lines[counter])
         if self.geometry != 0:
@@ -302,8 +296,8 @@
                 counter+=1
                 zlo,zhi = map(float,self.__global_header_lines[counter].split())
                 counter+=1
-                lo = na.array([xlo,ylo,zlo])
-                hi = na.array([xhi,yhi,zhi])
+                lo = np.array([xlo,ylo,zlo])
+                hi = np.array([xhi,yhi,zhi])
                 dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read,hierarchy=self))
                 grid_counter += 1 # this is global, and shouldn't be reset
@@ -347,17 +341,17 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self,start_stop):
-        start = na.array(map(int,start_stop[0].split(',')))
-        stop = na.array(map(int,start_stop[1].split(',')))
+        start = np.array(map(int,start_stop[0].split(',')))
+        stop = np.array(map(int,start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension,start,stop
         
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
-        self.grids = na.concatenate([level.grids for level in self.levels])
-        self.grid_levels = na.concatenate([level.ngrids*[level.level] for level in self.levels])
-        self.grid_levels = na.array(self.grid_levels.reshape((self.num_grids,1)),dtype='int32')
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
+        self.grids = np.concatenate([level.grids for level in self.levels])
+        self.grid_levels = np.concatenate([level.ngrids*[level.level] for level in self.levels])
+        self.grid_levels = np.array(self.grid_levels.reshape((self.num_grids,1)),dtype='int32')
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
         self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
         self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
@@ -368,9 +362,9 @@
             left_edges += [g.LeftEdge for g in level.grids]
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -399,10 +393,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _count_grids(self):
@@ -413,11 +407,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -551,14 +545,14 @@
                 
             elif param.startswith("geometry.prob_hi"):
                 self.domain_right_edge = \
-                    na.array([float(i) for i in vals.split()])
+                    np.array([float(i) for i in vals.split()])
             elif param.startswith("geometry.prob_lo"):
                 self.domain_left_edge = \
-                    na.array([float(i) for i in vals.split()])
+                    np.array([float(i) for i in vals.split()])
 
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
         self.dimensionality = self.parameters["TopGridRank"]
-        self.domain_dimensions = na.array(self.parameters["TopGridDimensions"],dtype='int32')
+        self.domain_dimensions = np.array(self.parameters["TopGridDimensions"],dtype='int32')
         self.refine_by = self.parameters["RefineBy"]
 
         if self.parameters.has_key("ComovingCoordinates") and bool(self.parameters["ComovingCoordinates"]):


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.utilities.physical_constants import \
     mh, kboltz
@@ -146,7 +146,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         else:
             return io._read_particles(data, p_field).astype(dtype)
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/orion/io.py
--- a/yt/frontends/orion/io.py
+++ b/yt/frontends/orion/io.py
@@ -25,7 +25,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 
@@ -76,7 +76,7 @@
                     if ( (grid.LeftEdge < coord).all() and 
                          (coord <= grid.RightEdge).all() ):
                         particles.append(read(line, field))
-        return na.array(particles)
+        return np.array(particles)
 
     def _read_data_set(self,grid,field):
         """
@@ -109,8 +109,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int,start.split(',')))
-            stop = na.array(map(int,stop.split(',')))
+            start = np.array(map(int,start.split(',')))
+            stop = np.array(map(int,stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -150,7 +150,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile,count=nElements,dtype=dtype)
+        field = np.fromfile(inFile,count=nElements,dtype=dtype)
         field = field.reshape(grid.ActiveDimensions, order='F')
 
         # we can/should also check against the max and min in the header file


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import stat
 import weakref
 import cStringIO
@@ -38,7 +38,6 @@
 from yt.data_objects.static_output import \
     StaticOutput
 
-from .fields import RAMSESFieldInfo, KnownRAMSESFields
 from .definitions import ramses_header
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
@@ -51,6 +50,7 @@
 import yt.utilities.fortran_utils as fpu
 from yt.geometry.oct_container import \
     RAMSESOctreeContainer
+from .fields import RAMSESFieldInfo, KnownRAMSESFields
 
 class RAMSESDomainFile(object):
     _last_mask = None
@@ -88,9 +88,9 @@
         # It goes: level, CPU, 8-variable
         min_level = self.pf.min_level
         n_levels = self.amr_header['nlevelmax'] - min_level
-        hydro_offset = na.zeros(n_levels, dtype='int64')
+        hydro_offset = np.zeros(n_levels, dtype='int64')
         hydro_offset -= 1
-        level_count = na.zeros(n_levels, dtype='int64')
+        level_count = np.zeros(n_levels, dtype='int64')
         for level in range(self.amr_header['nlevelmax']):
             for cpu in range(self.amr_header['nboundary'] +
                              self.amr_header['ncpu']):
@@ -152,14 +152,14 @@
         for header in ramses_header(hvals):
             hvals.update(fpu.read_attrs(f, header))
         # That's the header, now we skip a few.
-        hvals['numbl'] = na.array(hvals['numbl']).reshape(
+        hvals['numbl'] = np.array(hvals['numbl']).reshape(
             (hvals['nlevelmax'], hvals['ncpu']))
         fpu.skip(f)
         if hvals['nboundary'] > 0:
             fpu.skip(f, 2)
-            self.ngridbound = fpu.read_vector(f, 'i')
+            self.ngridbound = fpu.read_vector(f, 'i').astype("int64")
         else:
-            self.ngridbound = 0
+            self.ngridbound = np.zeros(hvals['nlevelmax'], dtype='int64')
         free_mem = fpu.read_attrs(f, (('free_mem', 5, 'i'), ) )
         ordering = fpu.read_vector(f, 'c')
         fpu.skip(f, 4)
@@ -182,7 +182,7 @@
         f.write(fb.read())
         f.seek(0)
         mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)",
-            self.domain_id, self.local_oct_count, self.ngridbound)
+            self.domain_id, self.local_oct_count, self.ngridbound.sum())
         def _ng(c, l):
             if c < self.amr_header['ncpu']:
                 ng = self.amr_header['numbl'][l, c]
@@ -192,6 +192,7 @@
             return ng
         min_level = self.pf.min_level
         total = 0
+        nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
         for level in range(self.amr_header['nlevelmax']):
             # Easier if do this 1-indexed
             for cpu in range(self.amr_header['nboundary'] + self.amr_header['ncpu']):
@@ -200,22 +201,22 @@
                 if ng == 0: continue
                 ind = fpu.read_vector(f, "I").astype("int64")
                 fpu.skip(f, 2)
-                pos = na.empty((ng, 3), dtype='float64')
-                pos[:,0] = fpu.read_vector(f, "d")
-                pos[:,1] = fpu.read_vector(f, "d")
-                pos[:,2] = fpu.read_vector(f, "d")
+                pos = np.empty((ng, 3), dtype='float64')
+                pos[:,0] = fpu.read_vector(f, "d") - nx
+                pos[:,1] = fpu.read_vector(f, "d") - ny
+                pos[:,2] = fpu.read_vector(f, "d") - nz
                 #pos *= self.pf.domain_width
                 #pos += self.parameter_file.domain_left_edge
                 fpu.skip(f, 31)
                 #parents = fpu.read_vector(f, "I")
                 #fpu.skip(f, 6)
-                #children = na.empty((ng, 8), dtype='int64')
+                #children = np.empty((ng, 8), dtype='int64')
                 #for i in range(8):
                 #    children[:,i] = fpu.read_vector(f, "I")
-                #cpu_map = na.empty((ng, 8), dtype="int64")
+                #cpu_map = np.empty((ng, 8), dtype="int64")
                 #for i in range(8):
                 #    cpu_map[:,i] = fpu.read_vector(f, "I")
-                #rmap = na.empty((ng, 8), dtype="int64")
+                #rmap = np.empty((ng, 8), dtype="int64")
                 #for i in range(8):
                 #    rmap[:,i] = fpu.read_vector(f, "I")
                 # We don't want duplicate grids.
@@ -251,7 +252,7 @@
             self.domain.pf.max_level, self.domain.domain_id, mask)
         level_counts[1:] = level_counts[:-1]
         level_counts[0] = 0
-        self.level_counts = na.add.accumulate(level_counts)
+        self.level_counts = np.add.accumulate(level_counts)
 
     def icoords(self, dobj):
         return self.oct_handler.icoords(self.domain.domain_id, self.mask,
@@ -266,7 +267,7 @@
     def fwidth(self, dobj):
         # Recall domain_dimensions is the number of cells, not octs
         base_dx = 1.0/self.domain.pf.domain_dimensions
-        widths = na.empty((self.cell_count, 3), dtype="float64")
+        widths = np.empty((self.cell_count, 3), dtype="float64")
         dds = (2**self.ires(dobj))
         for i in range(3):
             widths[:,i] = base_dx[i] / dds
@@ -287,14 +288,14 @@
         filled = pos = level_offset = 0
         min_level = self.domain.pf.min_level
         for field in fields:
-            tr[field] = na.zeros(self.cell_count, 'float64')
+            tr[field] = np.zeros(self.cell_count, 'float64')
         for level, offset in enumerate(self.domain.hydro_offset):
             if offset == -1: continue
             content.seek(offset)
             nc = self.domain.level_count[level]
             temp = {}
             for field in all_fields:
-                temp[field] = na.empty((nc, 8), dtype="float64")
+                temp[field] = np.empty((nc, 8), dtype="float64")
             for i in range(8):
                 for field in all_fields:
                     if field not in fields:
@@ -321,13 +322,14 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.max_level = pf.max_level
 
-        self.float_type = na.float64
+        self.float_type = np.float64
         super(RAMSESGeometryHandler, self).__init__(pf, data_style)
 
     def _initialize_oct_handler(self):
         self.domains = [RAMSESDomainFile(self.parameter_file, i + 1)
                         for i in range(self.parameter_file['ncpu'])]
-        total_octs = sum(dom.local_oct_count for dom in self.domains)
+        total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
+                         for dom in self.domains)
         self.num_grids = total_octs
         #this merely allocates space for the oct tree
         #and nothing else
@@ -337,7 +339,7 @@
             self.parameter_file.domain_right_edge)
         mylog.debug("Allocating %s octs", total_octs)
         self.oct_handler.allocate_domains(
-            [dom.local_oct_count + dom.ngridbound
+            [dom.local_oct_count #+ dom.ngridbound.sum()
              for dom in self.domains])
         #this actually reads every oct and loads it into the octree
         for dom in self.domains:
@@ -461,10 +463,10 @@
                 self.hilbert_indices[int(dom)] = (float(mi), float(ma))
         self.parameters.update(rheader)
         self.current_time = self.parameters['time'] * self.parameters['unit_t']
-        self.domain_left_edge = na.zeros(3, dtype='float64')
-        self.domain_dimensions = na.ones(3, dtype='int32') * \
+        self.domain_left_edge = np.zeros(3, dtype='float64')
+        self.domain_dimensions = np.ones(3, dtype='int32') * \
                         2**(self.min_level+1)
-        self.domain_right_edge = na.ones(3, dtype='float64')
+        self.domain_right_edge = np.ones(3, dtype='float64')
         # This is likely not true, but I am not sure how to otherwise
         # distinguish them.
         mylog.warning("No current mechanism of distinguishing cosmological simulations in RAMSES!")


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -35,13 +35,12 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
+RAMSESFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo, "RFI")
+add_field = RAMSESFieldInfo.add_field
 
 KnownRAMSESFields = FieldInfoContainer()
 add_ramses_field = KnownRAMSESFields.add_field
 
-RAMSESFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = RAMSESFieldInfo.add_field
-
 known_ramses_fields = [
     "Density",
     "x-velocity",
@@ -56,6 +55,18 @@
         add_ramses_field(f, function=NullFunc, take_log=True,
                   validators = [ValidateDataField(f)])
 
+def dx(field, data):
+    return data.fwidth[:,0]
+add_field("dx", function=dx)
+
+def dy(field, data):
+    return data.fwidth[:,1]
+add_field("dy", function=dy)
+
+def dz(field, data):
+    return data.fwidth[:,2]
+add_field("dz", function=dz)
+
 def _convertDensity(data):
     return data.convert("Density")
 KnownRAMSESFields["Density"]._units = r"\rm{g}/\rm{cm}^3"


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -24,7 +24,7 @@
 """
 
 from collections import defaultdict
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -39,7 +39,7 @@
         # Chunks in this case will have affiliated domain subset objects
         # Each domain subset will contain a hydro_offset array, which gives
         # pointers to level-by-level hydro information
-        tr = dict((f, na.empty(size, dtype='float64')) for f in fields)
+        tr = dict((f, np.empty(size, dtype='float64')) for f in fields)
         cp = 0
         for chunk in chunks:
             for subset in chunk.objs:
@@ -77,7 +77,7 @@
                 size += mask.sum()
                 masks[id(subset)] = mask
         # Now our second pass
-        tr = dict((f, na.empty(size, dtype="float64")) for f in fields)
+        tr = dict((f, np.empty(size, dtype="float64")) for f in fields)
         for chunk in chunks:
             for subset in chunk.objs:
                 f = open(subset.domain.part_fn, "rb")


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -7,6 +7,7 @@
     config = Configuration('frontends', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
+    config.add_subpackage("athena")
     config.add_subpackage("gdf")
     config.add_subpackage("chombo")
     config.add_subpackage("enzo")


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -24,7 +24,7 @@
 """
 
 import weakref
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import io_registry
 from yt.funcs import *
@@ -40,6 +40,8 @@
     FieldInfoContainer, NullFunc
 from yt.utilities.lib import \
     get_box_grids_level
+from yt.utilities.decompose import \
+    decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 
@@ -71,7 +73,7 @@
         my_ind = self.id - self._id_offset
         le = self.LeftEdge
         self.dds = self.Parent.dds/rf
-        ParentLeftIndex = na.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
+        ParentLeftIndex = np.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
         self.start_index = rf*(ParentLeftIndex + self.Parent.get_global_startindex()).astype('int64')
         self.LeftEdge = self.Parent.LeftEdge + self.Parent.dds * ParentLeftIndex
         self.RightEdge = self.LeftEdge + self.ActiveDimensions*self.dds
@@ -129,31 +131,9 @@
         self.directory = os.getcwd()
         GridGeometryHandler.__init__(self, pf, data_style)
 
-    def _initialize_data_storage(self):
-        pass
-
     def _count_grids(self):
         self.num_grids = self.stream_handler.num_grids
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            # Note that we call add_field on the field_info directly.  This
-            # will allow the same field detection mechanism to work for 1D, 2D
-            # and 3D fields.
-            self.pf.field_info.add_field(
-                    field, lambda a, b: None,
-                    convert_function=cf, take_log=False)
-            
-
     def _parse_hierarchy(self):
         self.grid_dimensions = self.stream_handler.dimensions
         self.grid_left_edge[:] = self.stream_handler.left_edges
@@ -180,7 +160,7 @@
             self._reconstruct_parent_child()
         self.max_level = self.grid_levels.max()
         mylog.debug("Preparing grids")
-        temp_grids = na.empty(self.num_grids, dtype='object')
+        temp_grids = np.empty(self.num_grids, dtype='object')
         for i, grid in enumerate(self.grids):
             if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
             grid.filename = None
@@ -191,7 +171,7 @@
         mylog.debug("Prepared")
 
     def _reconstruct_parent_child(self):
-        mask = na.empty(len(self.grids), dtype='int32')
+        mask = np.empty(len(self.grids), dtype='int32')
         mylog.debug("First pass; identifying child grids")
         for i, grid in enumerate(self.grids):
             get_box_grids_level(self.grid_left_edge[i,:],
@@ -199,7 +179,7 @@
                                 self.grid_levels[i] + 1,
                                 self.grid_left_edge, self.grid_right_edge,
                                 self.grid_levels, mask)
-            ids = na.where(mask.astype("bool"))
+            ids = np.where(mask.astype("bool"))
             grid._children_ids = ids[0] # where is a tuple
         mylog.debug("Second pass; identifying parents")
         for i, grid in enumerate(self.grids): # Second pass
@@ -208,32 +188,14 @@
 
     def _initialize_grid_arrays(self):
         GridGeometryHandler._initialize_grid_arrays(self)
-        self.grid_procs = na.zeros((self.num_grids,1),'int32')
-
-    def save_data(self, *args, **kwargs):
-        pass
-
-    def _detect_fields(self):
-        self.field_list = list(set(self.stream_handler.get_fields()))
-
-    def _setup_derived_fields(self):
-        self.derived_field_list = []
-        for field in self.parameter_file.field_info:
-            try:
-                fd = self.parameter_file.field_info[field].get_dependencies(
-                            pf = self.parameter_file)
-            except:
-                continue
-            available = na.all([f in self.field_list for f in fd.requested])
-            if available: self.derived_field_list.append(field)
-        for field in self.field_list:
-            if field not in self.derived_field_list:
-                self.derived_field_list.append(field)
+        self.grid_procs = np.zeros((self.num_grids,1),'int32')
 
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         GridGeometryHandler._setup_classes(self, dd)
-        self.object_types.sort()
+
+    def _detect_fields(self):
+        self.field_list = list(set(self.stream_handler.get_fields()))
 
     def _populate_grid_objects(self):
         for g in self.grids:
@@ -252,7 +214,7 @@
     _fieldinfo_known = KnownStreamFields
     _data_style = 'stream'
 
-    def __init__(self, stream_handler):
+    def __init__(self, stream_handler, storage_filename = None):
         #if parameter_override is None: parameter_override = {}
         #self._parameter_override = parameter_override
         #if conversion_override is None: conversion_override = {}
@@ -260,6 +222,7 @@
 
         self.stream_handler = stream_handler
         StaticOutput.__init__(self, "InMemoryParameterFile", self._data_style)
+        self.storage_filename = storage_filename
 
         self.units = {}
         self.time_units = {}
@@ -296,8 +259,8 @@
     @property
     def all_fields(self): return self[0].keys()
 
-def load_uniform_grid(data, domain_dimensions, domain_size_in_cm,
-                      sim_time=0.0, number_of_particles=0):
+def load_uniform_grid(data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                      nprocs=1, sim_time=0.0, number_of_particles=0):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -313,55 +276,67 @@
     ----------
     data : dict
         This is a dict of numpy arrays, where the keys are the field names.
-    domain_dimensiosn : array_like
+    domain_dimensions : array_like
         This is the domain dimensions of the grid
-    domain_size_in_cm : float
-        The size of the domain, in centimeters
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    nprocs: integer, optional
+        If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional
         The simulation time in seconds
     number_of_particles : int, optional
         If particle fields are included, set this to the number of particles
-        
+
     Examples
     --------
 
-    >>> arr = na.random.random((256, 256, 256))
+    >>> arr = np.random.random((128, 128, 129))
     >>> data = dict(Density = arr)
-    >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
-                
+    >>> bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+    >>> pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
+
     """
+
+    domain_dimensions = np.array(domain_dimensions)
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
     sfh = StreamDictFieldHandler()
-    sfh.update({0:data})
-    domain_dimensions = na.array(domain_dimensions)
-    if na.unique(domain_dimensions).size != 1:
-        print "We don't support variably sized domains yet."
-        raise RuntimeError
-    domain_left_edge = na.zeros(3, 'float64')
-    domain_right_edge = na.ones(3, 'float64')
-    grid_left_edges = na.zeros(3, "int64").reshape((1,3))
-    grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
 
-    grid_levels = na.array([0], dtype='int32').reshape((1,1))
-    grid_dimensions = grid_right_edges - grid_left_edges
-
-    grid_left_edges  = grid_left_edges.astype("float64")
-    grid_left_edges /= domain_dimensions*2**grid_levels
-    grid_left_edges *= domain_right_edge - domain_left_edge
-    grid_left_edges += domain_left_edge
-
-    grid_right_edges  = grid_right_edges.astype("float64")
-    grid_right_edges /= domain_dimensions*2**grid_levels
-    grid_right_edges *= domain_right_edge - domain_left_edge
-    grid_right_edges += domain_left_edge
+    if nprocs > 1:
+        temp = {}
+        new_data = {}
+        for key in data.keys():
+            psize = get_psize(np.array(data[key].shape), nprocs)
+            grid_left_edges, grid_right_edges, temp[key] = \
+                decompose_array(data[key], psize, bbox)
+            grid_dimensions = np.array([grid.shape for grid in temp[key]],
+                                       dtype="int32")
+        for gid in range(nprocs):
+            new_data[gid] = {}
+            for key in temp.keys():
+                new_data[gid].update({key:temp[key][gid]})
+        sfh.update(new_data)
+        del new_data, temp
+    else:
+        sfh.update({0:data})
+        grid_left_edges = domain_left_edge
+        grid_right_edges = domain_right_edge
+        grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
 
     handler = StreamHandler(
         grid_left_edges,
         grid_right_edges,
         grid_dimensions,
         grid_levels,
-        na.array([-1], dtype='int64'),
-        number_of_particles*na.ones(1, dtype='int64').reshape((1,1)),
-        na.zeros(1).reshape((1,1)),
+        -np.ones(nprocs, dtype='int64'),
+        number_of_particles*np.ones(nprocs, dtype='int64').reshape(nprocs,1),
+        np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
     )
 
@@ -375,10 +350,10 @@
     handler.cosmology_simulation = 0
 
     spf = StreamStaticOutput(handler)
-    spf.units["cm"] = domain_size_in_cm
+    spf.units["cm"] = sim_unit_to_cm
     spf.units['1'] = 1.0
     spf.units["unitary"] = 1.0
-    box_in_mpc = domain_size_in_cm / mpc_conversion['cm']
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -27,6 +27,7 @@
 
 import exceptions
 import os
+import numpy as np
 
 from yt.utilities.io_handler import \
     BaseIOHandler, _axis_ids
@@ -51,19 +52,29 @@
         # New in-place unit conversion breaks if we don't copy first
         return tr
 
-    def modify(self, field):
-        return field
-
-    def _read_field_names(self, grid):
-        return self.fields[grid.id].keys()
-
-    def _read_data_slice(self, grid, field, axis, coord):
-        sl = [slice(None), slice(None), slice(None)]
-        sl[axis] = slice(coord, coord + 1)
-        sl = tuple(reversed(sl))
-        tr = self.fields[grid.id][field][sl].swapaxes(0,2)
-        # In-place unit conversion requires we return a copy
-        return tr.copy()
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks)
+        if any((ftype != "gas" for ftype, fname in fields)):
+            raise NotImplementedError
+        rv = {}
+        for field in fields:
+            ftype, fname = field
+            rv[field] = np.empty(size, dtype="float64")
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s blocks",
+                    size, [f2 for f1, f2 in fields], ng)
+        for field in fields:
+            ftype, fname = field
+            ind = 0
+            for chunk in chunks:
+                for g in chunk.objs:
+                    mask = g.select(selector) # caches
+                    if mask is None: continue
+                    ds = self.fields[g.id][fname]
+                    data = ds[mask]
+                    rv[field][ind:ind+data.size] = data
+                    ind += data.size
+        return rv
 
     @property
     def _read_exception(self):


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/tiger/data_structures.py
--- a/yt/frontends/tiger/data_structures.py
+++ b/yt/frontends/tiger/data_structures.py
@@ -44,15 +44,15 @@
         self.RightEdge = right_edge
         self.Level = 0
         self.NumberOfParticles = 0
-        self.left_dims = na.array(left_dims, dtype='int32')
-        self.right_dims = na.array(right_dims, dtype='int32')
+        self.left_dims = np.array(left_dims, dtype='int32')
+        self.right_dims = np.array(right_dims, dtype='int32')
         self.ActiveDimensions = self.right_dims - self.left_dims
         self.Parent = None
         self.Children = []
 
     @property
     def child_mask(self):
-        return na.ones(self.ActiveDimensions, dtype='int32')
+        return np.ones(self.ActiveDimensions, dtype='int32')
 
     def __repr__(self):
         return "TigerGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
@@ -70,7 +70,7 @@
         # Tiger is unigrid
         self.ngdims = [i/j for i,j in
                 izip(self.pf.root_size, self.pf.max_grid_size)]
-        self.num_grids = na.prod(self.ngdims)
+        self.num_grids = np.prod(self.ngdims)
         self.max_level = 0
 
     def _setup_classes(self):
@@ -87,18 +87,18 @@
         DW = DRE - DLE
         gds = DW / self.ngdims
         rd = [self.pf.root_size[i]-self.pf.max_grid_size[i] for i in range(3)]
-        glx, gly, glz = na.mgrid[DLE[0]:DRE[0]-gds[0]:self.ngdims[0]*1j,
+        glx, gly, glz = np.mgrid[DLE[0]:DRE[0]-gds[0]:self.ngdims[0]*1j,
                                  DLE[1]:DRE[1]-gds[1]:self.ngdims[1]*1j,
                                  DLE[2]:DRE[2]-gds[2]:self.ngdims[2]*1j]
-        gdx, gdy, gdz = na.mgrid[0:rd[0]:self.ngdims[0]*1j,
+        gdx, gdy, gdz = np.mgrid[0:rd[0]:self.ngdims[0]*1j,
                                  0:rd[1]:self.ngdims[1]*1j,
                                  0:rd[2]:self.ngdims[2]*1j]
         LE, RE, levels, counts = [], [], [], []
         i = 0
         for glei, gldi in izip(izip(glx.flat, gly.flat, glz.flat),
                                izip(gdx.flat, gdy.flat, gdz.flat)):
-            gld = na.array(gldi)
-            gle = na.array(glei)
+            gld = np.array(gldi)
+            gle = np.array(glei)
             gre = gle + gds
             g = self.grid(i, self, gle, gre, gld, gld+self.pf.max_grid_size)
             grids.append(g)
@@ -108,13 +108,13 @@
             levels.append(g.Level)
             counts.append(g.NumberOfParticles)
             i += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
-        self.grid_dimensions[:] = na.array(dims, dtype='int64')
-        self.grid_left_edge[:] = na.array(LE, dtype='float64')
-        self.grid_right_edge[:] = na.array(RE, dtype='float64')
-        self.grid_levels.flat[:] = na.array(levels, dtype='int32')
-        self.grid_particle_count.flat[:] = na.array(counts, dtype='int32')
+        self.grid_dimensions[:] = np.array(dims, dtype='int64')
+        self.grid_left_edge[:] = np.array(LE, dtype='float64')
+        self.grid_right_edge[:] = np.array(RE, dtype='float64')
+        self.grid_levels.flat[:] = np.array(levels, dtype='int32')
+        self.grid_particle_count.flat[:] = np.array(counts, dtype='int32')
 
     def _populate_grid_objects(self):
         # We don't need to do anything here
@@ -186,8 +186,8 @@
         self.parameters['RefineBy'] = 2
 
     def _set_units(self):
-        self.domain_left_edge = na.zeros(3, dtype='float64')
-        self.domain_right_edge = na.ones(3, dtype='float64')
+        self.domain_left_edge = np.zeros(3, dtype='float64')
+        self.domain_right_edge = np.ones(3, dtype='float64')
         self.units = {}
         self.time_units = {}
         self.time_units['1'] = 1


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/frontends/tiger/io.py
--- a/yt/frontends/tiger/io.py
+++ b/yt/frontends/tiger/io.py
@@ -36,17 +36,17 @@
 
     def _read_data_set(self, grid, field):
         fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
-        LD = na.array(grid.left_dims, dtype='int64')
-        SS = na.array(grid.ActiveDimensions, dtype='int64')
-        RS = na.array(grid.pf.root_size, dtype='int64')
+        LD = np.array(grid.left_dims, dtype='int64')
+        SS = np.array(grid.ActiveDimensions, dtype='int64')
+        RS = np.array(grid.pf.root_size, dtype='int64')
         data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")
         return data
 
     def _read_data_slice(self, grid, field, axis, coord):
         fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
-        LD = na.array(grid.left_dims, dtype='int64').copy()
-        SS = na.array(grid.ActiveDimensions, dtype='int64').copy()
-        RS = na.array(grid.pf.root_size, dtype='int64').copy()
+        LD = np.array(grid.left_dims, dtype='int64').copy()
+        SS = np.array(grid.ActiveDimensions, dtype='int64').copy()
+        RS = np.array(grid.pf.root_size, dtype='int64').copy()
         LD[axis] += coord
         SS[axis] = 1
         data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -310,7 +310,8 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       ytcfg.getboolean("yt", "ipython_notebook"):
+       ytcfg.getboolean("yt", "ipython_notebook") or \
+       ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):
         from yt.gui.reason.extdirect_repl import ExtProgressBar


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -1,4 +1,4 @@
-"""
+""" 
 Geometry container base class.
 
 Author: Matthew Turk <matthewturk at gmail.com>
@@ -29,7 +29,7 @@
 import h5py
 from exceptions import IOError, TypeError
 from types import ClassType
-import numpy as na
+import numpy as np
 import abc
 
 from yt.funcs import *
@@ -141,7 +141,7 @@
                 self.parameter_file.field_dependencies[field] = fd
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -324,7 +324,7 @@
             ftype, fname = field
             finfo = dobj._get_field_info(*field)
             conv_factor = finfo._convert_function(self)
-            na.multiply(fields_to_return[field], conv_factor,
+            np.multiply(fields_to_return[field], conv_factor,
                         fields_to_return[field])
         return fields_to_return, fields_to_generate
 
@@ -352,7 +352,7 @@
         for field in fields_to_read:
             ftype, fname = field
             conv_factor = self.pf.field_info[fname]._convert_function(self)
-            na.multiply(fields_to_return[field], conv_factor,
+            np.multiply(fields_to_return[field], conv_factor,
                         fields_to_return[field])
         #mylog.debug("Don't know how to read %s", fields_to_generate)
         return fields_to_return, fields_to_generate
@@ -391,7 +391,7 @@
     @property
     def fcoords(self):
         if self._fcoords is not None: return self._fcoords
-        ci = na.empty((self.data_size, 3), dtype='float64')
+        ci = np.empty((self.data_size, 3), dtype='float64')
         self._fcoords = ci
         if self.data_size == 0: return self._fcoords
         ind = 0
@@ -406,7 +406,7 @@
     @property
     def icoords(self):
         if self._icoords is not None: return self._icoords
-        ci = na.empty((self.data_size, 3), dtype='int64')
+        ci = np.empty((self.data_size, 3), dtype='int64')
         self._icoords = ci
         if self.data_size == 0: return self._icoords
         ind = 0
@@ -421,7 +421,7 @@
     @property
     def fwidth(self):
         if self._fwidth is not None: return self._fwidth
-        ci = na.empty((self.data_size, 3), dtype='float64')
+        ci = np.empty((self.data_size, 3), dtype='float64')
         self._fwidth = ci
         if self.data_size == 0: return self._fwidth
         ind = 0
@@ -436,7 +436,7 @@
     @property
     def ires(self):
         if self._ires is not None: return self._ires
-        ci = na.empty(self.data_size, dtype='int64')
+        ci = np.empty(self.data_size, dtype='int64')
         self._ires = ci
         if self.data_size == 0: return self._ires
         ind = 0


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 import string, re, gc, time, cPickle
 import weakref
 
@@ -79,11 +79,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def clear_all_data(self):
         """
@@ -111,13 +111,13 @@
         self.level_stats['numgrids'] = [0 for i in range(MAXLEVEL)]
         self.level_stats['numcells'] = [0 for i in range(MAXLEVEL)]
         for level in xrange(self.max_level+1):
-            self.level_stats[level]['numgrids'] = na.sum(self.grid_levels == level)
+            self.level_stats[level]['numgrids'] = np.sum(self.grid_levels == level)
             li = (self.grid_levels[:,0] == level)
             self.level_stats[level]['numcells'] = self.grid_dimensions[li,:].prod(axis=1).sum()
 
     @property
     def grid_corners(self):
-        return na.array([
+        return np.array([
           [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
           [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
           [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],
@@ -169,7 +169,7 @@
             gi = dobj.selector.select_grids(self.grid_left_edge,
                                             self.grid_right_edge)
             grids = list(sorted(self.grids[gi], key = lambda g: g.filename))
-            dobj._chunk_info = na.array(grids, dtype='object')
+            dobj._chunk_info = np.array(grids, dtype='object')
         if getattr(dobj, "size", None) is None:
             dobj.size = self._count_selection(dobj)
             dobj.shape = (dobj.size,)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/geometry/object_finding_mixin.py
--- a/yt/geometry/object_finding_mixin.py
+++ b/yt/geometry/object_finding_mixin.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.lib import \
@@ -38,15 +38,15 @@
         along *axis*
         """
         # Let's figure out which grids are on the slice
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         # So if gRE > coord, we get a mask, if not, we get a zero
         #    if gLE > coord, we get a zero, if not, mask
         # Thus, if the coordinate is between the two edges, we win!
-        na.choose(na.greater(self.grid_right_edge[:,x_dict[axis]],coord[0]),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,x_dict[axis]],coord[0]),(mask,0),mask)
-        na.choose(na.greater(self.grid_right_edge[:,y_dict[axis]],coord[1]),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,y_dict[axis]],coord[1]),(mask,0),mask)
-        ind = na.where(mask == 1)
+        np.choose(np.greater(self.grid_right_edge[:,x_dict[axis]],coord[0]),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,x_dict[axis]],coord[0]),(mask,0),mask)
+        np.choose(np.greater(self.grid_right_edge[:,y_dict[axis]],coord[1]),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,y_dict[axis]],coord[1]),(mask,0),mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_max(self, field, finest_levels = 3):
@@ -73,13 +73,13 @@
               max_val, mx, my, mz)
         self.parameters["Max%sValue" % (field)] = max_val
         self.parameters["Max%sPos" % (field)] = "%s" % ((mx,my,mz),)
-        return max_val, na.array((mx,my,mz), dtype='float64')
+        return max_val, np.array((mx,my,mz), dtype='float64')
 
     def find_min(self, field):
         """
         Returns (value, center) of location of minimum for a given field
         """
-        gI = na.where(self.grid_levels >= 0) # Slow but pedantic
+        gI = np.where(self.grid_levels >= 0) # Slow but pedantic
         minVal = 1e100
         for grid in self.grids[gI[0]]:
             mylog.debug("Checking %s (level %s)", grid.id, grid.Level)
@@ -88,7 +88,7 @@
                 minCoord = coord
                 minVal = val
                 minGrid = grid
-        mc = na.array(minCoord)
+        mc = np.array(minCoord)
         pos=minGrid.get_position(mc)
         mylog.info("Min Value is %0.5e at %0.16f %0.16f %0.16f in grid %s at level %s", \
               minVal, pos[0], pos[1], pos[2], minGrid, minGrid.Level)
@@ -101,11 +101,11 @@
         """
         Returns the (objects, indices) of grids containing an (x,y,z) point
         """
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         for i in xrange(len(coord)):
-            na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-            na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-        ind = na.where(mask == 1)
+            np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+            np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_field_value_at_point(self, fields, coord):
@@ -132,7 +132,7 @@
         # Get the most-refined grid at this coordinate.
         this = self.find_point(coord)[0][-1]
         cellwidth = (this.RightEdge - this.LeftEdge) / this.ActiveDimensions
-        mark = na.zeros(3).astype('int')
+        mark = np.zeros(3).astype('int')
         # Find the index for the cell containing this point.
         for dim in xrange(len(coord)):
             mark[dim] = int((coord[dim] - this.LeftEdge[dim]) / cellwidth[dim])
@@ -149,15 +149,15 @@
         *axis*
         """
         # Let's figure out which grids are on the slice
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         # So if gRE > coord, we get a mask, if not, we get a zero
         #    if gLE > coord, we get a zero, if not, mask
         # Thus, if the coordinate is between the edges, we win!
-        #ind = na.where( na.logical_and(self.grid_right_edge[:,axis] > coord, \
+        #ind = np.where( np.logical_and(self.grid_right_edge[:,axis] > coord, \
                                        #self.grid_left_edge[:,axis] < coord))
-        na.choose(na.greater(self.grid_right_edge[:,axis],coord),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,axis],coord),(mask,0),mask)
-        ind = na.where(mask == 1)
+        np.choose(np.greater(self.grid_right_edge[:,axis],coord),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,axis],coord),(mask,0),mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_sphere_grids(self, center, radius):
@@ -165,29 +165,29 @@
         Returns objects, indices of grids within a sphere
         """
         centers = (self.grid_right_edge + self.grid_left_edge)/2.0
-        long_axis = na.maximum.reduce(self.grid_right_edge - self.grid_left_edge, 1)
-        t = na.abs(centers - center)
+        long_axis = np.maximum.reduce(self.grid_right_edge - self.grid_left_edge, 1)
+        t = np.abs(centers - center)
         DW = self.parameter_file.domain_right_edge \
            - self.parameter_file.domain_left_edge
-        na.minimum(t, na.abs(DW-t), t)
-        dist = na.sqrt(na.sum((t**2.0), axis=1))
-        gridI = na.where(dist < (radius + long_axis))
+        np.minimum(t, np.abs(DW-t), t)
+        dist = np.sqrt(np.sum((t**2.0), axis=1))
+        gridI = np.where(dist < (radius + long_axis))
         return self.grids[gridI], gridI
 
     def get_box_grids(self, left_edge, right_edge):
         """
         Gets back all the grids between a left edge and right edge
         """
-        grid_i = na.where((na.all(self.grid_right_edge > left_edge, axis=1)
-                         & na.all(self.grid_left_edge < right_edge, axis=1)) == True)
+        grid_i = np.where((np.all(self.grid_right_edge > left_edge, axis=1)
+                         & np.all(self.grid_left_edge < right_edge, axis=1)) == True)
         return self.grids[grid_i], grid_i
 
     def get_periodic_box_grids(self, left_edge, right_edge):
-        mask = na.zeros(self.grids.shape, dtype='bool')
+        mask = np.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
+        left_edge = np.array(left_edge)
+        right_edge = np.array(right_edge)
         dw = dr - dl
         left_dist = left_edge - dl
         db = right_edge - left_edge
@@ -201,26 +201,26 @@
                     nre = nle + db
                     g, gi = self.get_box_grids(nle, nre)
                     mask[gi] = True
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 
     def get_box_grids_below_level(self, left_edge, right_edge, level,
                                   min_level = 0):
         # We discard grids if they are ABOVE the level
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         get_box_grids_below_level(left_edge, right_edge,
                             level,
                             self.grid_left_edge, self.grid_right_edge,
                             self.grid_levels.astype("int32"), mask, min_level)
         mask = mask.astype("bool")
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 
     def get_periodic_box_grids_below_level(self, left_edge, right_edge, level,
                                            min_level = 0):
-        mask = na.zeros(self.grids.shape, dtype='bool')
+        mask = np.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
+        left_edge = np.array(left_edge)
+        right_edge = np.array(right_edge)
         dw = dr - dl
         left_dist = left_edge - dl
         db = right_edge - left_edge
@@ -235,5 +235,5 @@
                     g, gi = self.get_box_grids_below_level(nle, nre,
                                             level, min_level)
                     mask[gi] = True
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -53,6 +53,9 @@
     cdef np.float64_t DLE[3], DRE[3]
     cdef public int nocts
     cdef public int max_domain
+    cdef Oct* get(self, ppos)
+    cdef void neighbors(self, Oct *, Oct **)
+    cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
 
 cdef class RAMSESOctreeContainer(OctreeContainer):
     cdef OctAllocationContainer **domains


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -127,6 +127,39 @@
                 yield (this.ind, this.local_ind, this.domain)
             cur = cur.next
 
+    cdef void oct_bounds(self, Oct *o, np.float64_t *corner, np.float64_t *size):
+        cdef np.float64_t base_dx[3]
+        cdef int i
+        for i in range(3):
+            size[i] = (self.DRE[i] - self.DLE[i]) / (self.nn[i] << o.level)
+            corner[i] = o.pos[i] * size[i] + self.DLE[i]
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef Oct *get(self, ppos):
+        cdef np.int64_t ind[3]
+        cdef np.float64_t dds[3], cp[3], pp[3]
+        cdef Oct *cur
+        cdef int i
+        for i in range(3):
+            pp[i] = ppos[i] - self.DLE[i]
+            dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
+            ind[i] = <np.int64_t> (pp[i]/dds[i])
+            cp[i] = (ind[i] + 0.5) * dds[i]
+        cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
+        while cur.children[0][0][0] != NULL:
+            for i in range(3):
+                dds[i] = dds[i] / 2.0
+                if cp[i] > pp[i]:
+                    ind[i] = 0
+                    cp[i] -= dds[i] / 2.0
+                else:
+                    ind[i] = 1
+                    cp[i] += dds[i]/2.0
+            cur = cur.children[ind[0]][ind[1]][ind[2]]
+        return cur
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -154,6 +187,93 @@
                 count[o.domain - 1] += mask[oi,i]
         return count
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void neighbors(self, Oct* o, Oct* neighbors[27]):
+        cdef np.int64_t curopos[3]
+        cdef np.int64_t curnpos[3]
+        cdef np.int64_t npos[3]
+        cdef int i, j, k, ni, nj, nk, ind[3], nn, dl, skip
+        cdef np.float64_t dds[3], cp[3], pp[3]
+        cdef Oct* candidate
+        for i in range(27): neighbors[i] = NULL
+        nn = 0
+        for ni in range(3):
+            for nj in range(3):
+                for nk in range(3):
+                    #print "Handling neighbor", nn
+                    if ni == nj == nk == 1:
+                        neighbors[nn] = o
+                        nn += 1
+                        continue
+                    npos[0] = o.pos[0] + (ni - 1)
+                    npos[1] = o.pos[1] + (nj - 1)
+                    npos[2] = o.pos[2] + (nk - 1)
+                    for i in range(3):
+                        # Periodicity
+                        if npos[i] == -1:
+                            npos[i] = (self.nn[i]  << o.level) - 1
+                        elif npos[i] == (self.nn[i] << o.level):
+                            npos[i] = 0
+                        curopos[i] = o.pos[i]
+                        curnpos[i] = npos[i] 
+                    # Now we have our neighbor position and a safe place to
+                    # keep it.  curnpos will be the root index of the neighbor
+                    # at a given level, and npos will be constant.  curopos is
+                    # the candidate root at a level.
+                    candidate = o
+                    while candidate != NULL:
+                        if ((curopos[0] == curnpos[0]) and 
+                            (curopos[1] == curnpos[1]) and
+                            (curopos[2] == curnpos[2])):
+                            break
+                        # This one doesn't meet it, so we pop up a level.
+                        # First we update our positions, then we update our
+                        # candidate.
+                        for i in range(3):
+                            # We strip a digit off the right
+                            curopos[i] = (curopos[i] >> 1)
+                            curnpos[i] = (curnpos[i] >> 1)
+                        # Now we update to the candidate's parent, which should
+                        # have a matching position to curopos[]
+                        candidate = candidate.parent
+                    if candidate == NULL:
+                        # Worst case scenario
+                        for i in range(3):
+                            ind[i] = (npos[i] >> (o.level))
+                        #print "NULL", npos[0], npos[1], npos[2], ind[0], ind[1], ind[2], o.level
+                        candidate = self.root_mesh[ind[0]][ind[1]][ind[2]]
+                    # Now we have the common root, which may be NULL
+                    while candidate.level < o.level:
+                        dl = o.level - (candidate.level + 1)
+                        for i in range(3):
+                            ind[i] = (npos[i] >> dl) & 1
+                        if candidate.children[0][0][0] == NULL: break
+                        candidate = candidate.children[ind[0]][ind[1]][ind[2]]
+                    neighbors[nn] = candidate
+                    nn += 1
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def get_neighbor_boundaries(self, ppos):
+        cdef Oct *main = self.get(ppos)
+        cdef Oct* neighbors[27]
+        self.neighbors(main, neighbors)
+        cdef np.ndarray[np.float64_t, ndim=2] bounds
+        cdef np.float64_t corner[3], size[3]
+        bounds = np.zeros((27,6), dtype="float64")
+        cdef int i, ii
+        cdef np.float64_t base_dx[3]
+        tnp = 0
+        for i in range(27):
+            self.oct_bounds(neighbors[i], corner, size)
+            for ii in range(3):
+                bounds[i, ii] = corner[ii]
+                bounds[i, 3+ii] = size[ii]
+        return bounds
+
 cdef class RAMSESOctreeContainer(OctreeContainer):
 
     def allocate_domains(self, domain_counts):
@@ -215,7 +335,7 @@
     @cython.cdivision(True)
     def add(self, int curdom, int curlevel, int ng,
             np.ndarray[np.float64_t, ndim=2] pos,
-            int local_domain):
+            int local_domain, int skip_boundary = 1):
         cdef int level, no, p, i, j, k, ind[3]
         cdef int local = (local_domain == curdom)
         cdef Oct *cur, *next = NULL
@@ -224,15 +344,20 @@
         if curdom > self.max_domain: curdom = local_domain
         cdef OctAllocationContainer *cont = self.domains[curdom - 1]
         cdef int initial = cont.n_assigned
+        cdef int in_boundary = 0
         # How do we bootstrap ourselves?
         for p in range(no):
             #for every oct we're trying to add find the 
             #floating point unitary position on this level
+            in_boundary = 0
             for i in range(3):
                 pp[i] = pos[p, i]
                 dds[i] = (self.DRE[i] + self.DLE[i])/self.nn[i]
                 ind[i] = <np.int64_t> (pp[i]/dds[i])
                 cp[i] = (ind[i] + 0.5) * dds[i]
+                if ind[i] < 0 or ind[i] >= self.nn[i]:
+                    in_boundary = 1
+            if skip_boundary == in_boundary == 1: continue
             cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
             if cur == NULL:
                 if curlevel != 0:
@@ -422,6 +547,20 @@
     cdef ParticleArrays *last_sd
     cdef Oct** oct_list
 
+    def allocate_root(self):
+        cdef int i, j, k
+        cdef Oct *cur
+        for i in range(self.nn[0]):
+            for j in range(self.nn[1]):
+                for k in range(self.nn[2]):
+                    cur = self.allocate_oct()
+                    cur.level = 0
+                    cur.pos[0] = i
+                    cur.pos[1] = j
+                    cur.pos[2] = k
+                    cur.parent = NULL
+                    self.root_mesh[i][j][k] = cur
+
     def __dealloc__(self):
         cdef i, j, k
         for i in range(self.nn[0]):
@@ -444,8 +583,89 @@
             free(o.sd.domain_id)
         free(o)
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def icoords(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                np.int64_t cell_count):
+        cdef np.ndarray[np.int64_t, ndim=2] coords
+        coords = np.empty((cell_count, 3), dtype="int64")
+        cdef int oi, i, ci, ii
+        ci = 0
+        for oi in range(self.nocts):
+            o = self.oct_list[oi]
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        if mask[oi, ii] == 1:
+                            coords[ci, 0] = (o.pos[0] << 1) + i
+                            coords[ci, 1] = (o.pos[1] << 1) + j
+                            coords[ci, 2] = (o.pos[2] << 1) + k
+                            ci += 1
+        return coords
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def ires(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                np.int64_t cell_count):
+        cdef np.ndarray[np.int64_t, ndim=1] res
+        res = np.empty(cell_count, dtype="int64")
+        cdef int oi, i, ci
+        ci = 0
+        for oi in range(self.nocts):
+            o = self.oct_list[oi]
+            for i in range(8):
+                if mask[oi, i] == 1:
+                    res[ci] = o.level
+                    ci += 1
+        return res
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def fcoords(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                np.int64_t cell_count):
+        cdef np.ndarray[np.float64_t, ndim=2] coords
+        coords = np.empty((cell_count, 3), dtype="float64")
+        cdef int oi, i, ci
+        cdef np.float64_t base_dx[3], dx[3], pos[3]
+        for i in range(3):
+            # This is the base_dx, but not the base distance from the center
+            # position.  Note that the positions will also all be offset by
+            # dx/2.0.  This is also for *oct grids*, not cells.
+            base_dx[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
+        ci = 0
+        cdef int proc
+        for oi in range(self.nocts):
+            proc = 0
+            for i in range(8):
+                if mask[oi, i] == 1:
+                    proc = 1
+                    break
+            if proc == 0: continue
+            o = self.oct_list[oi]
+            for i in range(3):
+                # This gives the *grid* width for this level
+                dx[i] = base_dx[i] / (1 << o.level)
+                # o.pos is the *grid* index, so pos[i] is the center of the
+                # first cell in the grid
+                pos[i] = self.DLE[i] + o.pos[i]*dx[i] + dx[i]/4.0
+                dx[i] = dx[i] / 2.0 # This is now the *offset* 
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        if mask[oi, ii] == 0: continue
+                        coords[ci, 0] = pos[0] + dx[0] * i
+                        coords[ci, 1] = pos[1] + dx[1] * j
+                        coords[ci, 2] = pos[2] + dx[2] * k
+                        ci += 1
+        return coords
+
     def allocate_domains(self, domain_counts):
-        cdef int count, i
+        pass
 
     def finalize(self):
         self.oct_list = <Oct**> malloc(sizeof(Oct*)*self.nocts)
@@ -509,6 +729,7 @@
         cdef np.float64_t dds[3], cp[3], pp[3]
         cdef int ind[3]
         self.max_domain = max(self.max_domain, domain_id)
+        if self.root_mesh[0][0][0] == NULL: self.allocate_root()
         for p in range(no):
             level = 0
             for i in range(3):
@@ -518,10 +739,7 @@
                 cp[i] = (ind[i] + 0.5) * dds[i]
             cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
             if cur == NULL:
-                cur = self.allocate_oct()
-                self.root_mesh[ind[0]][ind[1]][ind[2]] = cur
-                for i in range(3):
-                    cur.pos[i] = ind[i] # root level
+                raise RuntimeError
             if cur.sd.np == 32:
                 self.refine_oct(cur, cp)
             while cur.sd.np < 0:
@@ -539,6 +757,7 @@
                     self.refine_oct(cur, cp)
             # Now we copy in our particle 
             pi = cur.sd.np
+            cur.level = level
             for i in range(3):
                 cur.sd.pos[i][pi] = pp[i]
             cur.sd.domain_id[pi] = domain_id
@@ -551,9 +770,11 @@
             for j in range(2):
                 for k in range(2):
                     noct = self.allocate_oct()
-                    noct.pos[0] = o.pos[0] << 1 + i
-                    noct.pos[1] = o.pos[1] << 1 + j
-                    noct.pos[2] = o.pos[2] << 1 + k
+                    noct.level = o.level + 1
+                    noct.pos[0] = (o.pos[0] << 1) + i
+                    noct.pos[1] = (o.pos[1] << 1) + j
+                    noct.pos[2] = (o.pos[2] << 1) + k
+                    noct.parent = o
                     o.children[i][j][k] = noct
         for m in range(32):
             for i in range(3):
@@ -615,3 +836,18 @@
             for i in range(o.sd.np):
                 dmask[o.sd.domain_id[i]] = 1
         return dmask.astype("bool")
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def count_neighbor_particles(self, ppos):
+        cdef Oct *main = self.get(ppos)
+        cdef Oct* neighbors[27]
+        self.neighbors(main, neighbors)
+        cdef int i, ni, dl, tnp
+        tnp = 0
+        for i in range(27):
+            if neighbors[i].sd != NULL:
+                tnp += neighbors[i].sd.np
+        return tnp
+


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/geometry/oct_geometry_handler.py
--- a/yt/geometry/oct_geometry_handler.py
+++ b/yt/geometry/oct_geometry_handler.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 import string, re, gc, time, cPickle
 import weakref
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1,4 +1,4 @@
-"""
+""" 
 Geometry selection routines.
 
 Author: Matthew Turk <matthewturk at gmail.com>
@@ -724,6 +724,66 @@
 
 ortho_ray_selector = OrthoRaySelector
 
+cdef class RaySelector(SelectorObject):
+
+    cdef np.float64_t p1[3]
+    cdef np.float64_t p2[3]
+    cdef np.float64_t vec[3]
+
+    def __init__(self, dobj):
+        cdef int i
+        for i in range(3):
+            self.vec[i] = dobj.vec[i]
+        if (0.0 <= self.vec[0]) and (0.0 <= self.vec[1]) and (0.0 <= self.vec[2]):
+            for i in range(3):
+                self.p1[i] = dobj.start_point[i]
+                self.p2[i] = dobj.end_point[i]
+        else:
+            for i in range(3):
+                self.p1[i] = dobj.end_point[i]
+                self.p2[i] = dobj.start_point[i]
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_grid(self, np.float64_t left_edge[3],
+                               np.float64_t right_edge[3]) nogil:
+        if ((self.p1[0] <= left_edge[0]) and (self.p2[0] > right_edge[0]) and 
+            (self.p1[1] <= left_edge[1]) and (self.p2[1] > right_edge[1]) and 
+            (self.p1[2] <= left_edge[2]) and (self.p2[2] > right_edge[2])):
+            return 1
+        return 0
+
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3],
+                         int eterm[3]) nogil:
+        if ((self.p1[0] <= pos[0] - 0.5*dds[0]) and 
+            (self.p2[0] >  pos[0] + 0.5*dds[0]) and 
+            (self.p1[1] <= pos[1] - 0.5*dds[1]) and 
+            (self.p2[1] >  pos[1] + 0.5*dds[1]) and 
+            (self.p1[2] <= pos[2] - 0.5*dds[2]) and 
+            (self.p2[2] >  pos[2] + 0.5*dds[2])):
+            return 1
+        return 0
+
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void set_bounds(self,
+                         np.float64_t left_edge[3], np.float64_t right_edge[3],
+                         np.float64_t dds[3], int ind[3][2], int *check):
+        cdef int i
+        for i in range(3):
+            ind[i][0] = <int> ((self.p1[i] - left_edge[i])/dds[i])
+            ind[i][1] = ind[i][0] + 1
+        check[0] = 0
+
+ray_selector = RaySelector
+
 cdef class GridCollectionSelector(SelectorObject):
 
     def __init__(self, dobj):


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -9,13 +9,19 @@
     config = Configuration('geometry',parent_package,top_path)
     config.add_extension("oct_container", 
                 ["yt/geometry/oct_container.pyx"],
-                libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+                libraries=["m"],
+                depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/geometry/oct_container.pxd",
+                         "yt/geometry/selection_routines.pxd"])
     config.add_extension("selection_routines", 
                 ["yt/geometry/selection_routines.pyx"],
                 extra_compile_args=['-fopenmp'],
                 extra_link_args=['-fopenmp'],
                 include_dirs=["yt/utilities/lib/"],
-                libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+                libraries=["m"],
+                depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/geometry/oct_container.pxd",
+                         "yt/geometry/selection_routines.pxd"])
     config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/gui/opengl_widgets/mip_viewer.py
--- a/yt/gui/opengl_widgets/mip_viewer.py
+++ b/yt/gui/opengl_widgets/mip_viewer.py
@@ -31,7 +31,7 @@
 import OpenGL.GL.ARB.framebuffer_object as GL_fbo
 import Image
 import glob
-import numpy as na
+import numpy as np
 import time
 
 from small_apps import ViewHandler3D, GenericGLUTScene
@@ -85,8 +85,8 @@
                     yield s[v][i]
 
     def _get_texture_vertices(self):
-        vs = [na.zeros(3, dtype='float32'),
-              na.ones(3, dtype='float32')]
+        vs = [np.zeros(3, dtype='float32'),
+              np.ones(3, dtype='float32')]
         #vs.reverse()
         for b in self.hv.bricks:
             shape = b.my_data[0].shape
@@ -126,7 +126,7 @@
 
         DW = self.hv.pf.domain_right_edge - self.hv.pf.domain_left_edge
         dds = ((brick.RightEdge - brick.LeftEdge) /
-               (na.array([ix,iy,iz], dtype='float32')-1)) / DW
+               (np.array([ix,iy,iz], dtype='float32')-1)) / DW
         BLE = brick.LeftEdge / DW - 0.5
         self._brick_textures.append(
             (id_field, (ix-1,iy-1,iz-1), dds, BLE))
@@ -135,7 +135,7 @@
 
     def _setup_colormap(self):
 
-        buffer = na.mgrid[0.0:1.0:256j]
+        buffer = np.mgrid[0.0:1.0:256j]
         colors = map_to_colors(buffer, "algae")
         
         GL.glActiveTexture(GL.GL_TEXTURE1)
@@ -165,17 +165,17 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(hv.bricks) * 6 * 4
-        self.v = na.fromiter(self._get_brick_vertices(offset),
+        self.v = np.fromiter(self._get_brick_vertices(offset),
                              dtype = 'float32', count = num * 3)
         self.vertices = vbo.VBO(self.v)
 
-        self.t = na.fromiter(self._get_texture_vertices(),
+        self.t = np.fromiter(self._get_texture_vertices(),
                              dtype = 'float32', count = num * 3)
         self.tvertices = vbo.VBO(self.t)
 
         self.ng = len(hv.bricks)
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float') + 30
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float') + 30
         self.position[2] = -2 # Offset backwards a bit
 
         self._setup_bricks()
@@ -373,8 +373,8 @@
 
     def reset_view(self):   
         print "RESETTING"
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float') + 30
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float') + 30
         self.position[2] = -2 # Offset backwards a bit
 
     def translate(self, axis, value):


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/gui/opengl_widgets/small_apps.py
--- a/yt/gui/opengl_widgets/small_apps.py
+++ b/yt/gui/opengl_widgets/small_apps.py
@@ -30,7 +30,7 @@
 from OpenGL.arrays import vbo, ArrayDatatype
 import Image
 import glob
-import numpy as na
+import numpy as np
 import time
 
 ESCAPE = '\033'
@@ -235,7 +235,7 @@
 
     @classmethod
     def from_image_file(cls, fn, tex_unit = GL.GL_TEXTURE0):
-        buffer = na.array(Image.open(fn))
+        buffer = np.array(Image.open(fn))
         print "Uploading buffer", buffer.min(), buffer.max(), buffer.shape, buffer.dtype
         obj = cls(tex_unit)
         obj.upload_image(buffer)
@@ -260,8 +260,8 @@
     @classmethod
     def from_image_files(cls, left_fn, right_fn, tex_unit = GL.GL_TEXTURE0):
         print "Uploading pairs from %s and %s" % (left_fn, right_fn)
-        left_buffer = na.array(Image.open(left_fn))
-        right_buffer = na.array(Image.open(right_fn))
+        left_buffer = np.array(Image.open(left_fn))
+        right_buffer = np.array(Image.open(right_fn))
         obj = cls(tex_unit)
         obj.left_image.upload_image(left_buffer)
         obj.right_image.upload_image(right_buffer)
@@ -294,7 +294,7 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(pf.h.grids) * 6 * 4
-        self.v = na.fromiter(self._get_grid_vertices(offset),
+        self.v = np.fromiter(self._get_grid_vertices(offset),
                              dtype = 'float32', count = num * 3)
 
         self.vertices = vbo.VBO(self.v)
@@ -408,7 +408,7 @@
 
         GL.glActiveTexture(GL.GL_TEXTURE0)
         id_field = GL.glGenTextures(1)
-        upload = na.log10(grid["Density"].astype("float32")).copy()
+        upload = np.log10(grid["Density"].astype("float32")).copy()
         self.mi = min(upload.min(), self.mi)
         self.ma = max(upload.max(), self.ma)
         #upload = (255*(upload - -31.0) / (-25.0 - -31.0)).astype("uint8")
@@ -452,13 +452,13 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(pf.h.grids) * 6 * 4
-        self.v = na.fromiter(self._get_grid_vertices(offset),
+        self.v = np.fromiter(self._get_grid_vertices(offset),
                              dtype = 'float32', count = num * 3)
 
         self.vertices = vbo.VBO(self.v)
         self.ng = len(pf.h.grids)
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float')
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float')
         self.position[2] = -2 # Offset backwards a bit
 
         self._setup_grids()


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/gui/reason/bottle_mods.py
--- a/yt/gui/reason/bottle_mods.py
+++ b/yt/gui/reason/bottle_mods.py
@@ -29,7 +29,7 @@
 import logging, threading
 import sys
 import urllib, urllib2
-import numpy as na
+import numpy as np
 
 from yt.utilities.bottle import \
     server_names, debug, route, run, request, ServerAdapter, response
@@ -134,7 +134,7 @@
         bp['binary'] = []
         for bkey in bkeys:
             bdata = bp.pop(bkey) # Get the binary data
-            if isinstance(bdata, na.ndarray):
+            if isinstance(bdata, np.ndarray):
                 bdata = bdata.tostring()
             bpserver = BinaryDelivery(bdata, bkey)
             self.binary_payloads.append(bpserver)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -30,7 +30,7 @@
 import cStringIO
 import logging
 import uuid
-import numpy as na
+import numpy as np
 import time
 import urllib
 import urllib2


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/gui/reason/html/app/controller/Notebook.js
--- a/yt/gui/reason/html/app/controller/Notebook.js
+++ b/yt/gui/reason/html/app/controller/Notebook.js
@@ -73,9 +73,11 @@
     },
 
     addRequest: function(request_id, command) {
+        /*console.log("Adding request " + request_id);*/
         this.getRequestsStore().add({
             request_id: request_id, command: command,
         });
+        reason.pending.update([this.getRequestsStore().count()]);
     },
 
     addCell: function(cell) {
@@ -85,6 +87,7 @@
             var ind = this.getRequestsStore().find(
                 'request_id', cell['result_id']);
             if (ind != -1) {
+                /*console.log("Removing request " + cell['result_id']);*/
                 var rec = this.getRequestsStore().removeAt(ind);
             }
             reason.pending.update([this.getRequestsStore().count()]);


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/gui/reason/pannable_map.py
--- a/yt/gui/reason/pannable_map.py
+++ b/yt/gui/reason/pannable_map.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import os
-import numpy as na
+import numpy as np
 import zipfile
 import sys
 
@@ -92,9 +92,9 @@
                                     dd*DW[0] / (64*256),
                                     dd*DW[0])
         if self.pf.field_info[self.field].take_log:
-            cmi = na.log10(cmi)
-            cma = na.log10(cma)
-            to_plot = apply_colormap(na.log10(frb[self.field]), color_bounds = (cmi, cma))
+            cmi = np.log10(cmi)
+            cma = np.log10(cma)
+            to_plot = apply_colormap(np.log10(frb[self.field]), color_bounds = (cmi, cma))
         else:
             to_plot = apply_colormap(frb[self.field], color_bounds = (cmi, cma))
         rv = write_png_to_string(to_plot)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/gui/reason/pyro_queue.py
--- a/yt/gui/reason/pyro_queue.py
+++ b/yt/gui/reason/pyro_queue.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/gui/reason/widget_builders.py
--- a/yt/gui/reason/widget_builders.py
+++ b/yt/gui/reason/widget_builders.py
@@ -35,7 +35,7 @@
         self._tf = tf
 
         self.center = self.pf.domain_center
-        self.normal_vector = na.array([0.7,1.0,0.3])
+        self.normal_vector = np.array([0.7,1.0,0.3])
         self.north_vector = [0.,0.,1.]
         self.steady_north = True
         self.fields = ['Density']
@@ -54,7 +54,7 @@
             roi = self.pf.h.region(self.center, self.center-self.width, self.center+self.width)
             self.mi, self.ma = roi.quantities['Extrema'](self.fields[0])[0]
             if self.log_fields[0]:
-                self.mi, self.ma = na.log10(self.mi), na.log10(self.ma)
+                self.mi, self.ma = np.log10(self.mi), np.log10(self.ma)
 
         self._tf = ColorTransferFunction((self.mi-2, self.ma+2), nbins=nbins)
 
@@ -87,10 +87,10 @@
     dd = pf.h.all_data()
     if value is None or rel_val:
         if value is None: value = 0.5
-        mi, ma = na.log10(dd.quantities["Extrema"]("Density")[0])
+        mi, ma = np.log10(dd.quantities["Extrema"]("Density")[0])
         value = 10.0**(value*(ma - mi) + mi)
     vert = dd.extract_isocontours("Density", value)
-    na.multiply(vert, 100, vert)
+    np.multiply(vert, 100, vert)
     return vert
 
 def get_streamlines(pf):


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/gui/reason/widget_store.py
--- a/yt/gui/reason/widget_store.py
+++ b/yt/gui/reason/widget_store.py
@@ -70,7 +70,7 @@
         if onmax: 
             center = pf.h.find_max('Density')[1]
         else:
-            center = na.array(center)
+            center = np.array(center)
         axis = inv_axis_names[axis.lower()]
         coord = center[axis]
         sl = pf.h.slice(axis, coord, center = center)
@@ -203,7 +203,7 @@
     def _pf_info(self):
         tr = {}
         for k, v in self.pf._mrep._attrs.items():
-            if isinstance(v, na.ndarray):
+            if isinstance(v, np.ndarray):
                 tr[k] = v.tolist()
             else:
                 tr[k] = v
@@ -237,9 +237,9 @@
     def deliver_isocontour(self, field, value, rel_val = False):
         ph = PayloadHandler()
         vert = get_isocontour(self.pf, field, value, rel_val)
-        normals = na.empty(vert.shape)
+        normals = np.empty(vert.shape)
         for i in xrange(vert.shape[0]/3):
-            n = na.cross(vert[i*3,:], vert[i*3+1,:])
+            n = np.cross(vert[i*3,:], vert[i*3+1,:])
             normals[i*3:i*3+3,:] = n[None,:]
         ph.widget_payload(self, {'ptype':'isocontour',
                                  'binary': ['vert', 'normals'],
@@ -260,20 +260,20 @@
         # Assume that path comes in as a list of matrice
         # Assume original vector is (0., 0., 1.), up is (0., 1., 0.)
         
-        views = [na.array(view).transpose() for view in views]
+        views = [np.array(view).transpose() for view in views]
 
-        times = na.linspace(0.0,1.0,len(times))
+        times = np.linspace(0.0,1.0,len(times))
                 
         # This is wrong.
-        reflect = na.array([[1,0,0],[0,1,0],[0,0,-1]])
+        reflect = np.array([[1,0,0],[0,1,0],[0,0,-1]])
 
-        rots = na.array([R[0:3,0:3] for R in views])
+        rots = np.array([R[0:3,0:3] for R in views])
 
-        rots = na.array([na.dot(reflect,rot) for rot in rots])
+        rots = np.array([np.dot(reflect,rot) for rot in rots])
 
-        centers = na.array([na.dot(rot,R[0:3,3]) for R,rot in zip(views,rots)])
+        centers = np.array([np.dot(rot,R[0:3,3]) for R,rot in zip(views,rots)])
 
-        ups = na.array([na.dot(rot,R[0:3,1]) for R,rot in zip(views,rots)])
+        ups = np.array([np.dot(rot,R[0:3,1]) for R,rot in zip(views,rots)])
 
         #print 'views'
         #for view in views: print view
@@ -284,12 +284,12 @@
         #print 'ups'
         #for up in ups: print up
 
-        pos = na.empty((N,3), dtype="float64")
-        uv = na.empty((N,3), dtype="float64")
-        f = na.zeros((N,3), dtype="float64")
+        pos = np.empty((N,3), dtype="float64")
+        uv = np.empty((N,3), dtype="float64")
+        f = np.zeros((N,3), dtype="float64")
         for i in range(3):
-            pos[:,i] = create_spline(times, centers[:,i], na.linspace(0.0,1.0,N))
-            uv[:,i] = create_spline(times, ups[:,i], na.linspace(0.0,1.0,N))
+            pos[:,i] = create_spline(times, centers[:,i], np.linspace(0.0,1.0,N))
+            uv[:,i] = create_spline(times, ups[:,i], np.linspace(0.0,1.0,N))
     
         path = [pos.tolist(), f.tolist(), uv.tolist()]
     


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -33,6 +33,7 @@
 # First module imports
 import sys, types, os, glob, cPickle, time
 import numpy as na # For historical reasons
+import numpy as np # For modern purposes
 import numpy # In case anyone wishes to use it by name
 
 # This next item will handle most of the actual startup procedures, but it will
@@ -52,7 +53,7 @@
 if __level >= int(ytcfgDefaults["loglevel"]):
     # This won't get displayed.
     mylog.debug("Turning off NumPy error reporting")
-    na.seterr(all = 'ignore')
+    np.seterr(all = 'ignore')
 
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
@@ -61,7 +62,7 @@
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \
-    ParticleTrajectoryCollection
+    ParticleTrajectoryCollection, ImageArray
 
 from yt.data_objects.derived_quantities import \
     add_quantity, quantity_info
@@ -95,6 +96,9 @@
 from yt.frontends.gdf.api import \
     GDFStaticOutput, GDFFieldInfo, add_gdf_field
 
+from yt.frontends.athena.api import \
+    AthenaStaticOutput, AthenaFieldInfo, add_athena_field
+
 #from yt.frontends.art.api import \
 #    ARTStaticOutput, ARTFieldInfo, add_art_field
 
@@ -118,7 +122,7 @@
     get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     callback_registry, write_bitmap, write_image, annotate_image, \
     apply_colormap, scale_image, write_projection, write_fits, \
-    SlicePlot, OffAxisSlicePlot, ProjectionPlot
+    SlicePlot, OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/testing.py
--- /dev/null
+++ b/yt/testing.py
@@ -0,0 +1,155 @@
+"""Provides utility and helper functions for testing in yt.
+
+Author: Anthony Scpatz <scopatz at gmail.com>
+Affiliation: The University of Chicago
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Anthony Scopatz.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+from yt.funcs import *
+from numpy.testing import assert_array_equal, assert_almost_equal, \
+    assert_approx_equal, assert_array_almost_equal, assert_equal, \
+    assert_array_less, assert_string_equal
+
+def assert_rel_equal(a1, a2, decimels):
+    return assert_almost_equal(a1/a2, 1.0, decimels)
+
+def amrspace(extent, levels=7, cells=8):
+    """Creates two numpy arrays representing the left and right bounds of 
+    an AMR grid as well as an array for the AMR level of each cell.
+
+    Parameters
+    ----------
+    extent : array-like
+        This a sequence of length 2*ndims that is the bounds of each dimension.
+        For example, the 2D unit square would be given by [0.0, 1.0, 0.0, 1.0].
+        A 3D cylindrical grid may look like [0.0, 2.0, -1.0, 1.0, 0.0, 2*np.pi].
+    levels : int or sequence of ints, optional
+        This is the number of AMR refinement levels.  If given as a sequence (of
+        length ndims), then each dimension will be refined down to this level.
+        All values in this array must be the same or zero.  A zero valued dimension
+        indicates that this dim should not be refined.  Taking the 3D cylindrical
+        example above if we don't want refine theta but want r and z at 5 we would 
+        set levels=(5, 5, 0).
+    cells : int, optional
+        This is the number of cells per refinement level.
+
+    Returns
+    -------
+    left : float ndarray, shape=(npoints, ndims)
+        The left AMR grid points.
+    right : float ndarray, shape=(npoints, ndims)
+        The right AMR grid points.
+    level : int ndarray, shape=(npoints,)
+        The AMR level for each point.
+
+    Examples
+    --------
+    >>> l, r, lvl = amrspace([0.0, 2.0, 1.0, 2.0, 0.0, 3.14], levels=(3,3,0), cells=2)
+    >>> print l
+    [[ 0.     1.     0.   ]
+     [ 0.25   1.     0.   ]
+     [ 0.     1.125  0.   ]
+     [ 0.25   1.125  0.   ]
+     [ 0.5    1.     0.   ]
+     [ 0.     1.25   0.   ]
+     [ 0.5    1.25   0.   ]
+     [ 1.     1.     0.   ]
+     [ 0.     1.5    0.   ]
+     [ 1.     1.5    0.   ]]
+
+    """
+    extent = np.asarray(extent, dtype='f8')
+    dextent = extent[1::2] - extent[::2]
+    ndims = len(dextent)
+
+    if isinstance(levels, int):
+        minlvl = maxlvl = levels
+        levels = np.array([levels]*ndims, dtype='int32')
+    else:
+        levels = np.asarray(levels, dtype='int32')
+        minlvl = levels.min()
+        maxlvl = levels.max()
+        if minlvl != maxlvl and (minlvl != 0 or set([minlvl, maxlvl]) != set(levels)):
+            raise ValueError("all levels must have the same value or zero.")
+    dims_zero = (levels == 0)
+    dims_nonzero = ~dims_zero
+    ndims_nonzero = dims_nonzero.sum()
+
+    npoints = (cells**ndims_nonzero - 1)*maxlvl + 1
+    left = np.empty((npoints, ndims), dtype='float64')
+    right = np.empty((npoints, ndims), dtype='float64')
+    level = np.empty(npoints, dtype='int32')
+
+    # fill zero dims
+    left[:,dims_zero] = extent[::2][dims_zero]
+    right[:,dims_zero] = extent[1::2][dims_zero]
+
+    # fill non-zero dims
+    dcell = 1.0 / cells
+    left_slice =  tuple([slice(extent[2*n], extent[2*n+1], extent[2*n+1]) if \
+        dims_zero[n] else slice(0.0,1.0,dcell) for n in range(ndims)])
+    right_slice = tuple([slice(extent[2*n+1], extent[2*n], -extent[2*n+1]) if \
+        dims_zero[n] else slice(dcell,1.0+dcell,dcell) for n in range(ndims)])
+    left_norm_grid = np.reshape(np.mgrid[left_slice].T.flat[ndims:], (-1, ndims))
+    lng_zero = left_norm_grid[:,dims_zero]
+    lng_nonzero = left_norm_grid[:,dims_nonzero]
+
+    right_norm_grid = np.reshape(np.mgrid[right_slice].T.flat[ndims:], (-1, ndims))
+    rng_zero = right_norm_grid[:,dims_zero]
+    rng_nonzero = right_norm_grid[:,dims_nonzero]
+
+    level[0] = maxlvl
+    left[0,:] = extent[::2]
+    right[0,dims_zero] = extent[1::2][dims_zero]
+    right[0,dims_nonzero] = (dcell**maxlvl)*dextent[dims_nonzero] + extent[::2][dims_nonzero]
+    for i, lvl in enumerate(range(maxlvl, 0, -1)):
+        start = (cells**ndims_nonzero - 1)*i + 1
+        stop = (cells**ndims_nonzero - 1)*(i+1) + 1
+        dsize = dcell**(lvl-1) * dextent[dims_nonzero]
+        level[start:stop] = lvl
+        left[start:stop,dims_zero] = lng_zero
+        left[start:stop,dims_nonzero] = lng_nonzero*dsize + extent[::2][dims_nonzero]
+        right[start:stop,dims_zero] = rng_zero
+        right[start:stop,dims_nonzero] = rng_nonzero*dsize + extent[::2][dims_nonzero]
+
+    return left, right, level
+
+def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",),
+                   negative = False, nprocs = 1):
+    from yt.data_objects.api import data_object_registry
+    from yt.frontends.stream.api import load_uniform_grid
+    if not iterable(ndims):
+        ndims = [ndims, ndims, ndims]
+    else:
+        assert(len(ndims) == 3)
+    if not iterable(negative):
+        negative = [negative for f in fields]
+    assert(len(fields) == len(negative))
+    offsets = []
+    for n in negative:
+        if n:
+            offsets.append(0.5)
+        else:
+            offsets.append(0.0)
+    data = dict((field, (np.random.random(ndims) - offset) * peak_value)
+                 for field,offset in zip(fields,offsets))
+    ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
+    return ug


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -25,7 +25,7 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-import numpy as na
+import numpy as np
 from yt.funcs import *
 from yt.visualization.volume_rendering.grid_partitioner import HomogenizedVolume
 from yt.visualization.image_writer import write_image, write_bitmap
@@ -61,7 +61,7 @@
 def _rchild_id(id): return (id<<1) + 2
 def _parent_id(id): return (id-1)>>1
 
-steps = na.array([[-1, -1, -1],
+steps = np.array([[-1, -1, -1],
                   [-1, -1,  0],
                   [-1, -1,  1],
                   [-1,  0, -1],
@@ -319,31 +319,31 @@
         if l_max is None:
             self.l_max = self.pf.hierarchy.max_level+1
         else:
-            self.l_max = na.min([l_max,self.pf.hierarchy.max_level+1])
+            self.l_max = np.min([l_max,self.pf.hierarchy.max_level+1])
 
         if le is None:
             self.domain_left_edge = pf.domain_left_edge
         else:
-            self.domain_left_edge = na.array(le)
+            self.domain_left_edge = np.array(le)
 
         if re is None:
             self.domain_right_edge = pf.domain_right_edge
         else:
-            self.domain_right_edge = na.array(re)
+            self.domain_right_edge = np.array(re)
 
-        self.domain_left_edge = na.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
-        self.domain_right_edge = na.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_left_edge = np.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_right_edge = np.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
 
         levels = pf.hierarchy.get_levels()
         root_grids = levels.next()
         covering_grids = root_grids
-        vol_needed = na.prod(self.domain_right_edge-self.domain_left_edge)
+        vol_needed = np.prod(self.domain_right_edge-self.domain_left_edge)
 
         for i in range(self.pf.hierarchy.max_level):
-            root_l_data = na.clip(na.array([grid.LeftEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
-            root_r_data = na.clip(na.array([grid.RightEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
+            root_l_data = np.clip(np.array([grid.LeftEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
+            root_r_data = np.clip(np.array([grid.RightEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
             
-            vol = na.prod(root_r_data-root_l_data,axis=1).sum()
+            vol = np.prod(root_r_data-root_l_data,axis=1).sum()
             if vol >= vol_needed:
                 covering_grids = root_grids
                 root_grids = levels.next()
@@ -356,18 +356,18 @@
         self.domain_left_edge = ((self.domain_left_edge)/rgdds).astype('int64')*rgdds
         self.domain_right_edge = (((self.domain_right_edge)/rgdds).astype('int64')+1)*rgdds
 
-        self.domain_left_edge = na.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
-        self.domain_right_edge = na.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_left_edge = np.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_right_edge = np.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
         
         self.my_l_corner = self.domain_left_edge
         self.my_r_corner = self.domain_right_edge
 
         #mylog.info('Making kd tree from le %s to %s'% (self.domain_left_edge, self.domain_right_edge))
         
-        root_l_data = na.array([grid.LeftEdge for grid in root_grids])
-        root_r_data = na.array([grid.RightEdge for grid in root_grids])
-        root_we_want = na.all(root_l_data < self.my_r_corner,axis=1)*\
-                       na.all(root_r_data > self.my_l_corner,axis=1)
+        root_l_data = np.array([grid.LeftEdge for grid in root_grids])
+        root_r_data = np.array([grid.RightEdge for grid in root_grids])
+        root_we_want = np.all(root_l_data < self.my_r_corner,axis=1)*\
+                       np.all(root_r_data > self.my_l_corner,axis=1)
         
         root_grids = root_grids[root_we_want]
 
@@ -550,7 +550,7 @@
         center cell (i,j,k) is ommitted.
         
         """
-        position = na.array(position)
+        position = np.array(position)
         grid = self.locate_brick(position).grid
         ci = ((position-grid.LeftEdge)/grid.dds).astype('int64')
         return self.locate_neighbors(grid,ci)
@@ -583,20 +583,20 @@
         center cell (i,j,k) is ommitted.
         
         """
-        ci = na.array(ci)
+        ci = np.array(ci)
         center_dds = grid.dds
-        position = grid.LeftEdge + (na.array(ci)+0.5)*grid.dds
-        grids = na.empty(26, dtype='object')
-        cis = na.empty([26,3], dtype='int64')
+        position = grid.LeftEdge + (np.array(ci)+0.5)*grid.dds
+        grids = np.empty(26, dtype='object')
+        cis = np.empty([26,3], dtype='int64')
         offs = 0.5*(center_dds + self.sdx)
 
         new_cis = ci + steps
-        in_grid = na.all((new_cis >=0)*
+        in_grid = np.all((new_cis >=0)*
                          (new_cis < grid.ActiveDimensions),axis=1)
         new_positions = position + steps*offs
         grids[in_grid] = grid
                 
-        get_them = na.argwhere(in_grid != True).ravel()
+        get_them = np.argwhere(in_grid != True).ravel()
         cis[in_grid] = new_cis[in_grid]
 
         if (in_grid != True).sum()>0:
@@ -668,7 +668,7 @@
                     dds = []
                     for i,field in enumerate(self.fields):
                         vcd = current_node.grid.get_vertex_centered_data(field,smoothed=True,no_ghost=self.no_ghost).astype('float64')
-                        if self.log_fields[i]: vcd = na.log10(vcd)
+                        if self.log_fields[i]: vcd = np.log10(vcd)
                         dds.append(vcd)
                     current_saved_grids.append(current_node.grid)
                     current_vcds.append(dds)
@@ -677,7 +677,7 @@
                           current_node.li[1]:current_node.ri[1]+1,
                           current_node.li[2]:current_node.ri[2]+1].copy() for d in dds]
                 
-                if na.any(current_node.r_corner-current_node.l_corner == 0):
+                if np.any(current_node.r_corner-current_node.l_corner == 0):
                     current_node.brick = None
                 else:
                     current_node.brick = PartitionedGrid(current_node.grid.id, data,
@@ -686,8 +686,8 @@
                                                          current_node.dims.astype('int64'))
                 self.bricks.append(current_node.brick)
                 self.brick_dimensions.append(current_node.dims)
-        self.bricks = na.array(self.bricks)
-        self.brick_dimensions = na.array(self.brick_dimensions)
+        self.bricks = np.array(self.bricks)
+        self.brick_dimensions = np.array(self.brick_dimensions)
         del current_saved_grids, current_vcds
         self.bricks_loaded = True
 
@@ -701,7 +701,7 @@
             dds = []
             for i,field in enumerate(self.fields):
                 vcd = current_node.grid.get_vertex_centered_data(field,smoothed=True,no_ghost=self.no_ghost).astype('float64')
-                if self.log_fields[i]: vcd = na.log10(vcd)
+                if self.log_fields[i]: vcd = np.log10(vcd)
                 dds.append(vcd)
                 self.current_saved_grids.append(current_node.grid)
                 self.current_vcds.append(dds)
@@ -734,14 +734,14 @@
         dds = thisnode.grid.dds
         gle = thisnode.grid.LeftEdge
         gre = thisnode.grid.RightEdge
-        thisnode.li = na.rint((thisnode.l_corner-gle)/dds).astype('int32')
-        thisnode.ri = na.rint((thisnode.r_corner-gle)/dds).astype('int32')
+        thisnode.li = np.rint((thisnode.l_corner-gle)/dds).astype('int32')
+        thisnode.ri = np.rint((thisnode.r_corner-gle)/dds).astype('int32')
         thisnode.dims = (thisnode.ri - thisnode.li).astype('int32')
         # Here the cost is actually inversely proportional to 4**Level (empirical)
-        #thisnode.cost = (na.prod(thisnode.dims)/4.**thisnode.grid.Level).astype('int64')
+        #thisnode.cost = (np.prod(thisnode.dims)/4.**thisnode.grid.Level).astype('int64')
         thisnode.cost = 1.0
         # Here is the old way
-        # thisnode.cost = na.prod(thisnode.dims).astype('int64')
+        # thisnode.cost = np.prod(thisnode.dims).astype('int64')
 
     def initialize_leafs(self):
         for node in self.depth_traverse():
@@ -754,7 +754,7 @@
         self.rebuild_references()
                 
     def trim_references(self):
-        par_tree_depth = long(na.log2(self.comm.size))
+        par_tree_depth = long(np.log2(self.comm.size))
         for i in range(2**self.comm.size):
             if ((i + 1)>>par_tree_depth) == 1:
                 # There are self.comm.size nodes that meet this criteria
@@ -767,7 +767,7 @@
                 del node.grids
             except:
                 pass
-            if not na.isreal(node.grid):
+            if not np.isreal(node.grid):
                 node.grid = node.grid.id
         if self.tree_dict[0].split_pos is None:
             self.tree_dict.pop(0)
@@ -942,7 +942,7 @@
         v = 0.0
         for node in self.depth_traverse():
             if node.grid is not None:
-                v += na.prod(node.r_corner - node.l_corner)
+                v += np.prod(node.r_corner - node.l_corner)
         return v
 
     def count_cells(self):
@@ -957,10 +957,10 @@
         Total volume of the tree.
         
         """
-        c = na.int64(0)
+        c = np.int64(0)
         for node in self.depth_traverse():
             if node.grid is not None:
-                c += na.prod(node.ri - node.li).astype('int64')
+                c += np.prod(node.ri - node.li).astype('int64')
         return c
 
     def _build(self, grids, parent, l_corner, r_corner):
@@ -994,15 +994,15 @@
         current_node.r_corner = r_corner
         # current_node.owner = self.comm.rank
         current_node.id = 0
-        par_tree_depth = int(na.log2(self.comm.size))
+        par_tree_depth = int(np.log2(self.comm.size))
         anprocs = 2**par_tree_depth
 
         volume_partitioned = 0.0
-        pbar = get_pbar("Building kd-Tree",
-                na.prod(self.domain_right_edge-self.domain_left_edge))
+        total_vol = np.prod(self.domain_right_edge-self.domain_left_edge)
+        pbar = get_pbar("Building kd-Tree", total_vol)
 
         while current_node is not None:
-            pbar.update(volume_partitioned)
+            pbar.update(min(volume_partitioned, total_vol))
 
             # If we don't have any grids, that means we are revisiting
             # a dividing node, and there is nothing to be done.
@@ -1034,12 +1034,12 @@
                     if len(thisgrid.Children) > 0 and thisgrid.Level < self.l_max:
                         # Get the children that are actually in the current volume
                         children = [child.id - self._id_offset for child in thisgrid.Children  
-                                    if na.all(child.LeftEdge < current_node.r_corner) & 
-                                    na.all(child.RightEdge > current_node.l_corner)]
+                                    if np.all(child.LeftEdge < current_node.r_corner) & 
+                                    np.all(child.RightEdge > current_node.l_corner)]
 
                         # If we have children, get all the new grids, and keep building the tree
                         if len(children) > 0:
-                            current_node.grids = self.pf.hierarchy.grids[na.array(children,copy=False)]
+                            current_node.grids = self.pf.hierarchy.grids[np.array(children,copy=False)]
                             current_node.parent_grid = thisgrid
                             #print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
                             del children
@@ -1048,7 +1048,7 @@
                     # Else make a leaf node (brick container)
                     #print 'My single grid covers the rest of the volume, and I have no children', thisgrid
                     set_leaf(current_node, thisgrid, current_node.l_corner, current_node.r_corner)
-                    volume_partitioned += na.prod(current_node.r_corner-current_node.l_corner)
+                    volume_partitioned += np.prod(current_node.r_corner-current_node.l_corner)
                     # print 'My single grid covers the rest of the volume, and I have no children'
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
@@ -1078,7 +1078,7 @@
         # For some reason doing dim 0 separately is slightly faster.
         # This could be rewritten to all be in the loop below.
 
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        data = np.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
         return data[:,:,best_dim], best_dim, split, less_ids, greater_ids
@@ -1089,7 +1089,7 @@
         left and right children.
         '''
 
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        data = np.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
 
@@ -1106,8 +1106,8 @@
         current_node.split_pos = split
         #less_ids0 = (data[:,0] < split)
         #greater_ids0 = (split < data[:,1])
-        #assert(na.all(less_ids0 == less_ids))
-        #assert(na.all(greater_ids0 == greater_ids))
+        #assert(np.all(less_ids0 == less_ids))
+        #assert(np.all(greater_ids0 == greater_ids))
 
         current_node.left_child = MasterNode(my_id=_lchild_id(current_node.id),
                                              parent=current_node,
@@ -1143,7 +1143,7 @@
             Position of the back center from which to start moving forward.
         front_center: array_like
             Position of the front center to which the traversal progresses.
-        image: na.array
+        image: np.array
             Image plane to contain resulting ray cast.
 
         Returns
@@ -1176,12 +1176,12 @@
     def reduce_tree_images(self, tree, viewpoint, image=None):
         if image is not None:
             self.image = image
-        rounds = int(na.log2(self.comm.size))
+        rounds = int(np.log2(self.comm.size))
         anprocs = 2**rounds
         my_node = tree
         my_node_id = 0
         my_node.owner = 0
-        path = na.binary_repr(anprocs+self.comm.rank)
+        path = np.binary_repr(anprocs+self.comm.rank)
         for i in range(rounds):
             try:
                 my_node.left_child.owner = my_node.owner
@@ -1215,7 +1215,7 @@
                     mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
                     arr2 = self.comm.recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    ta = 1.0 - na.sum(self.image,axis=2)
+                    ta = 1.0 - np.sum(self.image,axis=2)
                     ta[ta<0.0] = 0.0
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1237,8 +1237,8 @@
                     mylog.debug('%04i receiving image from %04i'%(self.comm.rank,front.owner))
                     arr2 = self.comm.recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    #ta = na.exp(-na.sum(arr2,axis=2))
-                    ta = 1.0 - na.sum(arr2, axis=2)
+                    #ta = np.exp(-np.sum(arr2,axis=2))
+                    ta = 1.0 - np.sum(arr2, axis=2)
                     ta[ta<0.0] = 0.0
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1292,8 +1292,8 @@
                     self.bricks.append(node.brick)
                     self.brick_dimensions.append(node.dims)
 
-            self.bricks = na.array(self.bricks)
-            self.brick_dimensions = na.array(self.brick_dimensions)
+            self.bricks = np.array(self.bricks)
+            self.brick_dimensions = np.array(self.brick_dimensions)
 
             self.bricks_loaded=True
             f.close()
@@ -1333,12 +1333,12 @@
         raise NotImplementedError()
         f = h5py.File(fn,"w")
         Nkd = len(self.tree)
-        kd_l_corners = na.zeros( (Nkd, 3), dtype='float64')
-        kd_r_corners = na.zeros( (Nkd, 3), dtype='float64')
-        kd_grids = na.zeros( (Nkd) )
-        kd_split_axs = na.zeros( (Nkd), dtype='int32')
-        kd_split_pos = na.zeros( (Nkd), dtype='float64')
-        kd_owners = na.zeros( (Nkd), dtype='int32')
+        kd_l_corners = np.zeros( (Nkd, 3), dtype='float64')
+        kd_r_corners = np.zeros( (Nkd, 3), dtype='float64')
+        kd_grids = np.zeros( (Nkd) )
+        kd_split_axs = np.zeros( (Nkd), dtype='int32')
+        kd_split_pos = np.zeros( (Nkd), dtype='float64')
+        kd_owners = np.zeros( (Nkd), dtype='int32')
         f.create_group("/bricks")
         for i, tree_item in enumerate(self.tree.iteritems()):
             kdid = tree_item[0]
@@ -1369,17 +1369,17 @@
         f.close()
         
     def corners_to_line(self,lc, rc):
-        x = na.array([ lc[0], lc[0], lc[0], lc[0], lc[0],
+        x = np.array([ lc[0], lc[0], lc[0], lc[0], lc[0],
                        rc[0], rc[0], rc[0], rc[0], rc[0],
                        rc[0], lc[0], lc[0], rc[0],
                        rc[0], lc[0], lc[0] ])
         
-        y = na.array([ lc[1], lc[1], rc[1], rc[1], lc[1],
+        y = np.array([ lc[1], lc[1], rc[1], rc[1], lc[1],
                        lc[1], lc[1], rc[1], rc[1], lc[1],
                        lc[1], lc[1], rc[1], rc[1],
                        rc[1], rc[1], lc[1] ])
         
-        z = na.array([ lc[2], rc[2], rc[2], lc[2], lc[2],
+        z = np.array([ lc[2], rc[2], rc[2], lc[2], lc[2],
                        lc[2], rc[2], rc[2], lc[2], lc[2],
                        rc[2], rc[2], rc[2], rc[2],
                        lc[2], lc[2], lc[2] ])


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -57,3 +57,8 @@
     TestBooleanANDParticleQuantity, \
     TestBooleanORParticleQuantity, \
     TestBooleanNOTParticleQuantity
+
+try:
+    from .framework import AnswerTesting
+except ImportError:
+    raise


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/answer_testing/default_tests.py
--- a/yt/utilities/answer_testing/default_tests.py
+++ b/yt/utilities/answer_testing/default_tests.py
@@ -67,3 +67,4 @@
         for field in sorted(self.result):
             for p1, p2 in zip(self.result[field], old_result[field]):
                 self.compare_data_arrays(p1, p2, self.tolerance)
+


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/answer_testing/framework.py
--- /dev/null
+++ b/yt/utilities/answer_testing/framework.py
@@ -0,0 +1,335 @@
+"""
+Answer Testing using Nose as a starting point
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import logging
+import os
+import hashlib
+import contextlib
+import urllib2
+
+from yt.testing import *
+from yt.utilities.command_line import get_yt_version
+from yt.config import ytcfg
+from yt.utilities.logger import \
+    disable_stream_logging
+from nose.plugins import Plugin
+from yt.mods import *
+import cPickle
+
+mylog = logging.getLogger('nose.plugins.answer-testing')
+
+_latest = "SomeValue"
+_url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
+
+class AnswerTestOpener(object):
+    def __init__(self, reference_name):
+        self.reference_name = reference_name
+        self.cache = {}
+
+    def get(self, pf_name, default = None):
+        if pf_name in self.cache: return self.cache[pf_name]
+        url = _url_path % (self.reference_name, pf_name)
+        try:
+            resp = urllib2.urlopen(url)
+            # This is dangerous, but we have a controlled S3 environment
+            data = resp.read()
+            rv = cPickle.loads(data)
+        except urllib2.HTTPError as ex:
+            raise YTNoOldAnswer(url)
+            mylog.warning("Missing %s (%s)", url, ex)
+            rv = default
+        self.cache[pf_name] = rv
+        return rv
+
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=None, help="The name against which we will compare")
+        parser.add_option("--answer-name", dest="this_name",
+            default=my_hash,
+            help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+        disable_stream_logging()
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
+        if options.compare_name is not None:
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
+
+    def finalize(self, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+ at contextlib.contextmanager
+def temp_cwd(cwd):
+    oldcwd = os.getcwd()
+    os.chdir(cwd)
+    yield
+    os.chdir(oldcwd)
+
+def can_run_pf(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        try:
+            load(pf_fn)
+        except:
+            return False
+    return AnswerTestingTest.result_storage is not None
+
+def data_dir_load(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        pf = load(pf_fn)
+        pf.h
+        return pf
+
+class AnswerTestingTest(object):
+    reference_storage = None
+
+    description = None
+    def __init__(self, pf_fn):
+        self.pf = data_dir_load(pf_fn)
+
+    def __call__(self):
+        nv = self.run()
+        if self.reference_storage is not None:
+            dd = self.reference_storage.get(str(self.pf))
+            if dd is None: raise YTNoOldAnswer()
+            ov = dd[self.name]
+            self.compare(nv, ov)
+        else:
+            ov = None
+        self.result_storage[str(self.pf)][self.name] = nv
+
+    def compare(self, new_result, old_result):
+        raise RuntimeError
+
+    def create_obj(self, pf, obj_type):
+        # obj_type should be tuple of
+        #  ( obj_name, ( args ) )
+        if obj_type is None:
+            return pf.h.all_data()
+        cls = getattr(pf.h, obj_type[0])
+        obj = cls(*obj_type[1])
+        return obj
+
+    @property
+    def sim_center(self):
+        """
+        This returns the center of the domain.
+        """
+        return 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+
+    @property
+    def max_dens_location(self):
+        """
+        This is a helper function to return the location of the most dense
+        point.
+        """
+        return self.pf.h.find_max("Density")[1]
+
+    @property
+    def entire_simulation(self):
+        """
+        Return an unsorted array of values that cover the entire domain.
+        """
+        return self.pf.h.all_data()
+
+    @property
+    def name(self):
+        obj_type = getattr(self, "obj_type", None)
+        if obj_type is None:
+            oname = "all"
+        else:
+            oname = "_".join((str(s) for s in obj_type))
+        args = [self._type_name, str(self.pf), oname]
+        args += [str(getattr(self, an)) for an in self._attrs]
+        return "_".join(args)
+        
+class FieldValuesTest(AnswerTestingTest):
+    _type_name = "FieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None):
+        super(FieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        return obj[self.field]
+
+    def compare(self, new_result, old_result):
+        assert_equal(new_result, old_result)
+
+class ProjectionValuesTest(AnswerTestingTest):
+    _type_name = "ProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(ProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        return proj.field_data
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class GridValuesTest(AnswerTestingTest):
+    _type_name = "GridValues"
+    _attrs = ("field",)
+
+    def __init__(self, pf_fn, field):
+        super(GridValuesTest, self).__init__(pf_fn)
+        self.field = field
+
+    def run(self):
+        hashes = {}
+        for g in self.pf.h.grids:
+            hashes[g.id] = hashlib.md5(g[self.field].tostring()).hexdigest()
+            g.clear_data()
+        return hashes
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class GridHierarchyTest(AnswerTestingTest):
+    _type_name = "GridHierarchy"
+    _attrs = ()
+
+    def run(self):
+        result = {}
+        result["grid_dimensions"] = self.pf.h.grid_dimensions
+        result["grid_left_edges"] = self.pf.h.grid_left_edge
+        result["grid_right_edges"] = self.pf.h.grid_right_edge
+        result["grid_levels"] = self.pf.h.grid_levels
+        result["grid_particle_count"] = self.pf.h.grid_particle_count
+
+    def compare(self, new_result, old_result):
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class ParentageRelationshipsTest(AnswerTestingTest):
+    _type_name = "ParentageRelationships"
+    _attrs = ()
+    def run(self):
+        result = {}
+        result["parents"] = []
+        result["children"] = []
+        for g in self.pf.h.grids:
+            p = g.Parent
+            if p is None:
+                result["parents"].append(None)
+            elif hasattr(p, "id"):
+                result["parents"].append(p.id)
+            else:
+                result["parents"].append([pg.id for pg in p])
+            result["children"].append([c.id for c in g.Children])
+        return result
+
+    def compare(self, new_result, old_result):
+        for newp, oldp in zip(new_result["parents"], old_result["parents"]):
+            assert(newp == oldp)
+        for newc, oldc in zip(new_result["children"], old_result["children"]):
+            assert(newp == oldp)
+
+def requires_pf(pf_fn):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    if not can_run_pf(pf_fn):
+        return ffalse
+    else:
+        return ftrue
+
+def standard_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/answer_testing/hydro_tests.py
--- a/yt/utilities/answer_testing/hydro_tests.py
+++ b/yt/utilities/answer_testing/hydro_tests.py
@@ -99,11 +99,11 @@
     field = None
 
     def run(self):
-        na.random.seed(4333)
-        start_point = na.random.random(self.pf.dimensionality) * \
+        np.random.seed(4333)
+        start_point = np.random.random(self.pf.dimensionality) * \
             (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
             self.pf.domain_left_edge
-        end_point   = na.random.random(self.pf.dimensionality) * \
+        end_point   = np.random.random(self.pf.dimensionality) * \
             (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
             self.pf.domain_left_edge
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -29,14 +29,12 @@
 # We first create our dictionary of tests to run.  This starts out empty, and
 # as tests are imported it will be filled.
 if "TestRegistry" not in locals():
-    print "Initializing TestRegistry"
     class TestRegistry(dict):
         def __new__(cls, *p, **k):
             if not '_the_instance' in cls.__dict__:
                 cls._the_instance = dict.__new__(cls)
                 return cls._the_instance
 if "test_registry" not in locals():
-    print "Initializing test_registry"
     test_registry = TestRegistry()
 
 # The exceptions we raise, related to the character of the failure.
@@ -55,10 +53,10 @@
 
 class ArrayDelta(ValueDelta):
     def __repr__(self):
-        nabove = len(na.where(self.delta > self.acceptable)[0])
+        nabove = len(np.where(self.delta > self.acceptable)[0])
         return "ArrayDelta: Delta max of %s, acceptable of %s.\n" \
                "%d of %d points above the acceptable limit" % \
-               (na.nanmax(self.delta), self.acceptable, nabove,
+               (np.nanmax(self.delta), self.acceptable, nabove,
                 self.delta.size)
 
 class ShapeMismatch(RegressionTestException):
@@ -122,8 +120,8 @@
         """
         if a1.shape != a2.shape:
             raise ShapeMismatch(a1, a2)
-        delta = na.abs(a1 - a2).astype("float64")/(a1 + a2)
-        if na.nanmax(delta) > acceptable:
+        delta = np.abs(a1 - a2).astype("float64")/(a1 + a2)
+        if np.nanmax(delta) > acceptable:
             raise ArrayDelta(delta, acceptable)
         return True
 
@@ -134,7 +132,7 @@
         difference is greater than `acceptable` it is considered a failure and
         an appropriate exception is raised.
         """
-        delta = na.abs(v1 - v2)/(v1 + v2)
+        delta = np.abs(v1 - v2)/(v1 + v2)
         if delta > acceptable:
             raise ValueDelta(delta, acceptable)
         return True


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/answer_testing/particle_tests.py
--- a/yt/utilities/answer_testing/particle_tests.py
+++ b/yt/utilities/answer_testing/particle_tests.py
@@ -32,13 +32,13 @@
         # Tests to make sure there are no particle positions aren't changing
         # drastically. This is very unlikely to be a problem.
         all = self.pf.h.all_data()
-        min = na.empty(3,dtype='float64')
+        min = np.empty(3,dtype='float64')
         max = min.copy()
         dims = ["particle_position_x","particle_position_y",
             "particle_position_z"]
         for i in xrange(3):
-            min[i] = na.min(all[dims[i]])
-            max[i] = na.max(all[dims[i]])
+            min[i] = np.min(all[dims[i]])
+            max[i] = np.max(all[dims[i]])
         self.result = (min,max)
     
     def compare(self, old_result):


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -152,7 +152,7 @@
                    help="Width in specified units"),
     unit    = dict(short="-u", long="--unit",
                    action="store", type=str,
-                   dest="unit", default='unitary',
+                   dest="unit", default='1',
                    help="Desired units"),
     center  = dict(short="-c", long="--center",
                    action="store", type=float,
@@ -1095,8 +1095,12 @@
                   )
         else:
             from IPython.config.loader import Config
+            import sys
             cfg = Config()
+            # prepend sys.path with current working directory
+            sys.path.insert(0,'')
             IPython.embed(config=cfg,user_ns=local_ns)
+            
 
 class YTMapserverCmd(YTCommand):
     args = ("proj", "field", "weight",
@@ -1188,6 +1192,40 @@
         import yt.utilities.lodgeit as lo
         lo.main( None, download=args.number )
 
+class YTNotebookUploadCmd(YTCommand):
+    args = (dict(short="file", type=str),)
+    description = \
+        """
+        Upload an IPython notebook to hub.yt-project.org.
+        """
+
+    name = "upload_notebook"
+    def __call__(self, args):
+        filename = args.file
+        if not os.path.isfile(filename):
+            raise IOError(filename)
+        if not filename.endswith(".ipynb"):
+            print "File must be an IPython notebook!"
+            return 1
+        import json
+        try:
+            t = json.loads(open(filename).read())['metadata']['name']
+        except (ValueError, KeyError):
+            print "File does not appear to be an IPython notebook."
+        from yt.utilities.minimal_representation import MinimalNotebook
+        mn = MinimalNotebook(filename, t)
+        rv = mn.upload()
+        print "Upload successful!"
+        print
+        print "To access your raw notebook go here:"
+        print
+        print "  %s" % (rv['url'])
+        print
+        print "To view your notebook go here:"
+        print
+        print "  %s" % (rv['url'].replace("/go/", "/nb/"))
+        print
+
 class YTPlotCmd(YTCommand):
     args = ("width", "unit", "bn", "proj", "center",
             "zlim", "axis", "field", "weight", "skip",
@@ -1212,7 +1250,7 @@
             v, center = pf.h.find_max("Density")
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
-        center = na.array(center)
+        center = np.array(center)
         if args.axis == 4:
             axes = range(3)
         else:
@@ -1266,12 +1304,12 @@
             v, center = pf.h.find_max("Density")
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
-        center = na.array(center)
+        center = np.array(center)
 
         L = args.viewpoint
         if L is None:
             L = [1.]*3
-        L = na.array(args.viewpoint)
+        L = np.array(args.viewpoint)
 
         unit = args.unit
         if unit is None:
@@ -1302,7 +1340,7 @@
             roi = pf.h.region(center, center-width, center+width)
             mi, ma = roi.quantities['Extrema'](field)[0]
             if log:
-                mi, ma = na.log10(mi), na.log10(ma)
+                mi, ma = np.log10(mi), np.log10(ma)
         else:
             mi, ma = myrange[0], myrange[1]
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 c_kms = 2.99792458e5 # c in km/s
 G = 6.67259e-8 # cgs
@@ -49,40 +49,40 @@
 
     def ComovingTransverseDistance(self,z_i,z_f):
          if (self.OmegaCurvatureNow > 0):
-             return (self.HubbleDistance() / na.sqrt(self.OmegaCurvatureNow) * 
-                     na.sinh(na.sqrt(self.OmegaCurvatureNow) * 
+             return (self.HubbleDistance() / np.sqrt(self.OmegaCurvatureNow) * 
+                     np.sinh(np.sqrt(self.OmegaCurvatureNow) * 
                           self.ComovingRadialDistance(z_i,z_f) / 
                           self.HubbleDistance()))
          elif (self.OmegaCurvatureNow < 0):
-             return (self.HubbleDistance() / na.sqrt(na.fabs(self.OmegaCurvatureNow)) * 
-                     sin(na.sqrt(na.fabs(self.OmegaCurvatureNow)) * 
+             return (self.HubbleDistance() / np.sqrt(np.fabs(self.OmegaCurvatureNow)) * 
+                     sin(np.sqrt(np.fabs(self.OmegaCurvatureNow)) * 
                          self.ComovingRadialDistance(z_i,z_f) / self.HubbleDistance()))
          else:
              return self.ComovingRadialDistance(z_i,z_f)
 
     def ComovingVolume(self,z_i,z_f):
         if (self.OmegaCurvatureNow > 0):
-             return (2 * na.pi * na.power(self.HubbleDistance(), 3) / self.OmegaCurvatureNow * 
+             return (2 * np.pi * np.power(self.HubbleDistance(), 3) / self.OmegaCurvatureNow * 
                      (self.ComovingTransverseDistance(z_i,z_f) / self.HubbleDistance() * 
-                      na.sqrt(1 + self.OmegaCurvatureNow * 
+                      np.sqrt(1 + self.OmegaCurvatureNow * 
                            sqr(self.ComovingTransverseDistance(z_i,z_f) / 
                                self.HubbleDistance())) - 
-                      ana.sinh(na.fabs(self.OmegaCurvatureNow) * 
+                      anp.sinh(np.fabs(self.OmegaCurvatureNow) * 
                             self.ComovingTransverseDistance(z_i,z_f) / 
-                            self.HubbleDistance()) / na.sqrt(self.OmegaCurvatureNow)) / 1e9)
+                            self.HubbleDistance()) / np.sqrt(self.OmegaCurvatureNow)) / 1e9)
         elif (self.OmegaCurvatureNow < 0):
-             return (2 * na.pi * na.power(self.HubbleDistance(), 3) / 
-                     na.fabs(self.OmegaCurvatureNow) * 
+             return (2 * np.pi * np.power(self.HubbleDistance(), 3) / 
+                     np.fabs(self.OmegaCurvatureNow) * 
                      (self.ComovingTransverseDistance(z_i,z_f) / self.HubbleDistance() * 
-                      na.sqrt(1 + self.OmegaCurvatureNow * 
+                      np.sqrt(1 + self.OmegaCurvatureNow * 
                            sqr(self.ComovingTransverseDistance(z_i,z_f) / 
                                self.HubbleDistance())) - 
-                      asin(na.fabs(self.OmegaCurvatureNow) * 
+                      asin(np.fabs(self.OmegaCurvatureNow) * 
                            self.ComovingTransverseDistance(z_i,z_f) / 
                            self.HubbleDistance()) / 
-                      na.sqrt(na.fabs(self.OmegaCurvatureNow))) / 1e9)
+                      np.sqrt(np.fabs(self.OmegaCurvatureNow))) / 1e9)
         else:
-             return (4 * na.pi * na.power(self.ComovingTransverseDistance(z_i,z_f), 3) / 
+             return (4 * np.pi * np.power(self.ComovingTransverseDistance(z_i,z_f), 3) / 
                      3 / 1e9)
 
     def AngularDiameterDistance(self,z_i,z_f):
@@ -100,18 +100,18 @@
         return (romberg(self.AgeIntegrand,z,1000) / self.HubbleConstantNow * kmPerMpc)
 
     def AngularScale_1arcsec_kpc(self,z_i,z_f):
-        return (self.AngularDiameterDistance(z_i,z_f) / 648. * na.pi)
+        return (self.AngularDiameterDistance(z_i,z_f) / 648. * np.pi)
 
     def CriticalDensity(self,z):
-        return (3.0 / 8.0 / na.pi * sqr(self.HubbleConstantNow / kmPerMpc) / G *
+        return (3.0 / 8.0 / np.pi * sqr(self.HubbleConstantNow / kmPerMpc) / G *
                 (self.OmegaLambdaNow + ((1 + z)**3.0) * self.OmegaMatterNow))
 
     def AgeIntegrand(self,z):
         return (1 / (z + 1) / self.ExpansionFactor(z))
 
     def ExpansionFactor(self,z):
-        return na.sqrt(self.OmegaMatterNow * ((1 + z)**3.0) + 
-                    self.OmegaCurvatureNow * na.sqrt(1 + z) + 
+        return np.sqrt(self.OmegaMatterNow * ((1 + z)**3.0) + 
+                    self.OmegaCurvatureNow * np.sqrt(1 + z) + 
                     self.OmegaLambdaNow)
 
     def InverseExpansionFactor(self,z):
@@ -162,8 +162,8 @@
         """
         # Changed 2.52e17 to 2.52e19 because H_0 is in km/s/Mpc, 
         # instead of 100 km/s/Mpc.
-        return 2.52e19 / na.sqrt(self.OmegaMatterNow) / \
-            self.HubbleConstantNow / na.power(1 + self.InitialRedshift,1.5)
+        return 2.52e19 / np.sqrt(self.OmegaMatterNow) / \
+            self.HubbleConstantNow / np.power(1 + self.InitialRedshift,1.5)
 
     def ComputeRedshiftFromTime(self,time):
         """
@@ -183,18 +183,18 @@
  
         # 1) For a flat universe with OmegaMatterNow = 1, it's easy.
  
-        if ((na.fabs(self.OmegaMatterNow-1) < OMEGA_TOLERANCE) and
+        if ((np.fabs(self.OmegaMatterNow-1) < OMEGA_TOLERANCE) and
             (self.OmegaLambdaNow < OMEGA_TOLERANCE)):
-            a = na.power(time/self.InitialTime,2.0/3.0)
+            a = np.power(time/self.InitialTime,2.0/3.0)
  
         # 2) For OmegaMatterNow < 1 and OmegaLambdaNow == 0 see
         #    Peebles 1993, eq. 13-3, 13-10.
         #    Actually, this is a little tricky since we must solve an equation
-        #    of the form eta - na.sinh(eta) + x = 0..
+        #    of the form eta - np.sinh(eta) + x = 0..
  
         if ((self.OmegaMatterNow < 1) and 
             (self.OmegaLambdaNow < OMEGA_TOLERANCE)):
-            x = 2*TimeHubble0*na.power(1.0 - self.OmegaMatterNow, 1.5) / \
+            x = 2*TimeHubble0*np.power(1.0 - self.OmegaMatterNow, 1.5) / \
                 self.OmegaMatterNow;
  
             # Compute eta in a three step process, first from a third-order
@@ -203,12 +203,12 @@
             # eta.  This works well because parts 1 & 2 are an excellent approximation
             # when x is small and part 3 converges quickly when x is large. 
  
-            eta = na.power(6*x,1.0/3.0)                # part 1
-            eta = na.power(120*x/(20+eta*eta),1.0/3.0) # part 2
+            eta = np.power(6*x,1.0/3.0)                # part 1
+            eta = np.power(120*x/(20+eta*eta),1.0/3.0) # part 2
             for i in range(40):                      # part 3
                 eta_old = eta
-                eta = na.arcsinh(eta + x)
-                if (na.fabs(eta-eta_old) < ETA_TOLERANCE): 
+                eta = np.arcsinh(eta + x)
+                if (np.fabs(eta-eta_old) < ETA_TOLERANCE): 
                     break
                 if (i == 39):
                     print "No convergence after %d iterations." % i
@@ -216,7 +216,7 @@
             # Now use eta to compute the expansion factor (eq. 13-10, part 2).
  
             a = self.OmegaMatterNow/(2.0*(1.0 - self.OmegaMatterNow))*\
-                (na.cosh(eta) - 1.0)
+                (np.cosh(eta) - 1.0)
 
         # 3) For OmegaMatterNow > 1 and OmegaLambdaNow == 0, use sin/cos.
         #    Easy, but skip it for now.
@@ -228,10 +228,10 @@
  
         # 4) For flat universe, with non-zero OmegaLambdaNow, see eq. 13-20.
  
-        if ((na.fabs(OmegaCurvatureNow) < OMEGA_TOLERANCE) and
+        if ((np.fabs(OmegaCurvatureNow) < OMEGA_TOLERANCE) and
             (self.OmegaLambdaNow > OMEGA_TOLERANCE)):
-            a = na.power(self.OmegaMatterNow / (1 - self.OmegaMatterNow),1.0/3.0) * \
-                na.power(na.sinh(1.5 * na.sqrt(1.0 - self.OmegaMatterNow)*\
+            a = np.power(self.OmegaMatterNow / (1 - self.OmegaMatterNow),1.0/3.0) * \
+                np.power(np.sinh(1.5 * np.sqrt(1.0 - self.OmegaMatterNow)*\
                                      TimeHubble0),2.0/3.0)
 
 
@@ -249,29 +249,29 @@
         # 1) For a flat universe with OmegaMatterNow = 1, things are easy.
  
         if ((self.OmegaMatterNow == 1.0) and (self.OmegaLambdaNow == 0.0)):
-            TimeHubble0 = 2.0/3.0/na.power(1+z,1.5)
+            TimeHubble0 = 2.0/3.0/np.power(1+z,1.5)
  
         # 2) For OmegaMatterNow < 1 and OmegaLambdaNow == 0 see
         #    Peebles 1993, eq. 13-3, 13-10.
  
         if ((self.OmegaMatterNow < 1) and (self.OmegaLambdaNow == 0)):
-            eta = na.arccosh(1 + 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
-            TimeHubble0 = self.OmegaMatterNow/(2*na.power(1.0-self.OmegaMatterNow, 1.5))*\
-                (na.sinh(eta) - eta)
+            eta = np.arccosh(1 + 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
+            TimeHubble0 = self.OmegaMatterNow/(2*np.power(1.0-self.OmegaMatterNow, 1.5))*\
+                (np.sinh(eta) - eta)
  
         # 3) For OmegaMatterNow > 1 and OmegaLambdaNow == 0, use sin/cos.
  
         if ((self.OmegaMatterNow > 1) and (self.OmegaLambdaNow == 0)):
-            eta = na.acos(1 - 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
-            TimeHubble0 = self.OmegaMatterNow/(2*na.power(1.0-self.OmegaMatterNow, 1.5))*\
-                (eta - na.sin(eta))
+            eta = np.acos(1 - 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
+            TimeHubble0 = self.OmegaMatterNow/(2*np.power(1.0-self.OmegaMatterNow, 1.5))*\
+                (eta - np.sin(eta))
  
         # 4) For flat universe, with non-zero OmegaLambdaNow, see eq. 13-20.
  
-        if ((na.fabs(OmegaCurvatureNow) < 1.0e-3) and (self.OmegaLambdaNow != 0)):
-            TimeHubble0 = 2.0/3.0/na.sqrt(1-self.OmegaMatterNow)*\
-                na.arcsinh(na.sqrt((1-self.OmegaMatterNow)/self.OmegaMatterNow)/ \
-                               na.power(1+z,1.5))
+        if ((np.fabs(OmegaCurvatureNow) < 1.0e-3) and (self.OmegaLambdaNow != 0)):
+            TimeHubble0 = 2.0/3.0/np.sqrt(1-self.OmegaMatterNow)*\
+                np.arcsinh(np.sqrt((1-self.OmegaMatterNow)/self.OmegaMatterNow)/ \
+                               np.power(1+z,1.5))
   
         # Now convert from Time * H0 to time.
   


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/decompose.py
--- /dev/null
+++ b/yt/utilities/decompose.py
@@ -0,0 +1,151 @@
+"""
+Automagical cartesian domain decomposition.
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Author: Artur Gawryszczak <gawrysz at gmail.com>
+Affiliation: PCSS
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+SIEVE_PRIMES = \
+    lambda l: l and l[:1] + SIEVE_PRIMES([n for n in l if n % l[0]])
+
+
+def decompose_to_primes(max_prime):
+    """ Decompose number into the primes """
+    for prime in SIEVE_PRIMES(range(2, max_prime)):
+        if prime * prime > max_prime:
+            break
+        while max_prime % prime == 0:
+            yield prime
+            max_prime /= prime
+    if max_prime > 1:
+        yield max_prime
+
+
+def decompose_array(arr, psize, bbox):
+    """ Calculate list of product(psize) subarrays of arr, along with their
+        left and right edges
+    """
+    grid_left_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    grid_right_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    n_d = arr.shape
+    d_s = (bbox[:, 1] - bbox[:, 0]) / n_d
+    dist = np.mgrid[bbox[0, 0]:bbox[0, 1]:d_s[0],
+                    bbox[1, 0]:bbox[1, 1]:d_s[1],
+                    bbox[2, 0]:bbox[2, 1]:d_s[2]]
+    for i in range(3):
+        xyz = split_array(dist[i], psize)
+        for j in range(np.product(psize)):
+            grid_left_edges[j, i] = xyz[j][0, 0, 0]
+            grid_right_edges[j, i] = xyz[j][-1, -1, -1] + d_s[i]
+        del xyz
+    del dist
+    patches = split_array(arr, psize)
+    return grid_left_edges, grid_right_edges, patches
+
+
+def evaluate_domain_decomposition(n_d, pieces, ldom):
+    """ Evaluate longest to shortest edge ratio
+        BEWARE: lot's of magic here """
+    eff_dim = (n_d > 1).sum()
+    ideal_bsize = eff_dim * (pieces * np.product(n_d) ** (eff_dim - 1)
+                             ) ** (1.0 / eff_dim)
+    mask = np.where(n_d > 1)
+    nd_arr = np.array(n_d, dtype=np.float64)[mask]
+    bsize = int(np.sum(ldom[mask] / nd_arr * np.product(nd_arr)))
+    load_balance = float(np.product(n_d)) / \
+        (float(pieces) * np.product((n_d - 1) / ldom + 1))
+
+    # 0.25 is magic number
+    quality = load_balance / (1 + 0.25 * (bsize / ideal_bsize - 1.0))
+    # \todo add a factor that estimates lower cost when x-direction is
+    # not chopped too much
+    # \deprecated estimate these magic numbers
+    quality *= (1. - (0.001 * ldom[0] + 0.0001 * ldom[1]) / pieces)
+    if np.any(ldom > n_d):
+        quality = 0
+
+    return quality
+
+
+def factorize_number(pieces):
+    """ Return array consiting of prime, its power and number of different
+        decompositions in three dimensions for this prime
+    """
+    factors = [factor for factor in decompose_to_primes(pieces)]
+    temp = np.bincount(factors)
+    return np.array(
+        [(prime, temp[prime], (temp[prime] + 1) * (temp[prime] + 2) / 2)
+         for prime in np.unique(factors)]
+    )
+
+
+def get_psize(n_d, pieces):
+    """ Calculate the best division of array into px*py*pz subarrays.
+        The goal is to minimize the ratio of longest to shortest edge
+        to minimize the amount of inter-process communication.
+    """
+    fac = factorize_number(pieces)
+    nfactors = len(fac[:, 2])
+    best = 0.0
+    while np.all(fac[:, 2] > 0):
+        ldom = np.ones(3, dtype=np.int)
+        for nfac in range(nfactors):
+            i = int(np.sqrt(0.25 + 2 * (fac[nfac, 2] - 1)) - 0.5)
+            k = fac[nfac, 2] - int(1 + i * (i + 1) / 2)
+            i = fac[nfac, 1] - i
+            j = fac[nfac, 1] - (i + k)
+            ldom *= fac[nfac, 0] ** np.array([i, j, k])
+
+        quality = evaluate_domain_decomposition(n_d, pieces, ldom)
+        if quality > best:
+            best = quality
+            p_size = ldom
+        # search for next unique combination
+        for j in range(nfactors):
+            if fac[j, 2] > 1:
+                fac[j, 2] -= 1
+                break
+            else:
+                if (j < nfactors - 1):
+                    fac[j, 2] = int((fac[j, 1] + 1) * (fac[j, 1] + 2) / 2)
+                else:
+                    fac[:, 2] = 0  # no more combinations to try
+
+    return p_size
+
+
+def split_array(tab, psize):
+    """ Split array into px*py*pz subarrays. """
+    n_d = np.array(tab.shape, dtype=np.int64)
+    slices = []
+    for i in range(psize[0]):
+        for j in range(psize[1]):
+            for k in range(psize[2]):
+                piece = np.array((i, j, k), dtype=np.int64)
+                lei = n_d * piece / psize
+                rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
+                slices.append(np.s_[lei[0]:rei[0], lei[1]:
+                                    rei[1], lei[2]:rei[2]])
+    return [tab[slc] for slc in slices]


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -136,3 +136,23 @@
 class YTCoordinateNotImplemented(YTException):
     def __str__(self):
         return "This coordinate is not implemented for this geometry type."
+
+class YTUnitNotRecognized(YTException):
+    def __init__(self, unit):
+        self.unit = unit
+
+    def __str__(self):
+        return "This parameter file doesn't recognize %s" % self.unit
+
+class YTHubRegisterError(YTException):
+    def __str__(self):
+        return "You must create an API key before uploading.  See " + \
+               "https://data.yt-project.org/getting_started.html"
+
+class YTNoOldAnswer(YTException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "There is no old answer available.\n" + \
+               str(self.path)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/flagging_methods.py
--- /dev/null
+++ b/yt/utilities/flagging_methods.py
@@ -0,0 +1,51 @@
+"""
+Utilities for flagging zones for refinement in a dataset
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np # For modern purposes
+
+flagging_method_registry = {}
+
+def flag_cells(grid, methods):
+    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+    for method in methods:
+        flagged |= method(grid)
+    return flagged
+
+class FlaggingMethod(object):
+    _skip_add = False
+    class __metaclass__(type):
+        def __init__(cls, name, b, d):
+            type.__init__(cls, name, b, d)
+            if hasattr(cls, "_type_name") and not cls._skip_add:
+                flagging_method_registry[cls._type_name] = cls
+
+class OverDensity(FlaggingMethod):
+    _type_name = "overdensity"
+    def __init__(self, over_density):
+        self.over_density = over_density
+
+    def __call__(self, pf, grid):
+        rho = grid["Density"] / (pf.refine_by**grid.Level)
+        return (rho > self.over_density)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/fortran_utils.py
--- a/yt/utilities/fortran_utils.py
+++ b/yt/utilities/fortran_utils.py
@@ -24,7 +24,7 @@
 """
 
 import struct
-import numpy as na
+import numpy as np
 import os
 
 def read_attrs(f, attrs):
@@ -106,7 +106,7 @@
         print "fmt = '%s' ; ss = %s ; ds = %s" % (fmt, ss, ds)
         raise RuntimeError
     count = ss / ds
-    tr = na.fromstring(f.read(na.dtype(d).itemsize*count), d, count)
+    tr = np.fromstring(f.read(np.dtype(d).itemsize*count), d, count)
     vec = struct.unpack(fmt, f.read(struct.calcsize(fmt)))
     assert(vec[-1] == ss)
     return tr


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/grid_data_format/conversion/conversion_athena.py
--- a/yt/utilities/grid_data_format/conversion/conversion_athena.py
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -1,6 +1,6 @@
 import os
 import weakref
-import numpy as na
+import numpy as np
 import h5py as h5
 from conversion_abc import *
 from glob import glob
@@ -55,11 +55,11 @@
             grid['domain'] = int(splitup[8].rstrip(','))
             self.current_time = grid['time']
         elif "DIMENSIONS" in splitup:
-            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+            grid['dimensions'] = np.array(splitup[-3:]).astype('int')
         elif "ORIGIN" in splitup:
-            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+            grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
         elif "SPACING" in splitup:
-            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+            grid['dds'] = np.array(splitup[-3:]).astype('float64')
         elif "CELL_DATA" in splitup:
             grid["ncells"] = int(splitup[-1])
         elif "SCALARS" in splitup:
@@ -94,12 +94,12 @@
         proc_names = glob(self.source_dir+'id*')
         #print 'Reading a dataset from %i Processor Files' % len(proc_names)
         N = len(proc_names)
-        grid_dims = na.empty([N,3],dtype='int64')
-        grid_left_edges = na.empty([N,3],dtype='float64')
-        grid_dds = na.empty([N,3],dtype='float64')
-        grid_levels = na.zeros(N,dtype='int64')
-        grid_parent_ids = -1*na.ones(N,dtype='int64')
-        grid_particle_counts = na.zeros([N,1],dtype='int64')
+        grid_dims = np.empty([N,3],dtype='int64')
+        grid_left_edges = np.empty([N,3],dtype='float64')
+        grid_dds = np.empty([N,3],dtype='float64')
+        grid_levels = np.zeros(N,dtype='int64')
+        grid_parent_ids = -1*np.ones(N,dtype='int64')
+        grid_particle_counts = np.zeros([N,1],dtype='int64')
 
         for i in range(N):
             if i == 0:
@@ -128,12 +128,12 @@
 
             if len(line) == 0: break
             
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 grid['dimensions'] -= 1
                 grid['dimensions'][grid['dimensions']==0]=1
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 print 'product of dimensions %i not equal to number of cells %i' % \
-                      (na.prod(grid['dimensions']), grid['ncells'])
+                      (np.prod(grid['dimensions']), grid['ncells'])
                 raise TypeError
 
             # Append all hierachy info before reading this grid's data
@@ -149,7 +149,7 @@
 
         ## --------- Begin level nodes --------- ##
         g = f.create_group('gridded_data_format')
-        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['format_version']=np.float32(1.0)
         g.attrs['data_software']='athena'
         data_g = f.create_group('data')
         field_g = f.create_group('field_types')
@@ -159,8 +159,8 @@
 
         gles = grid_left_edges
         gdims = grid_dims
-        dle = na.min(gles,axis=0)
-        dre = na.max(gles+grid_dims*grid_dds,axis=0)
+        dle = np.min(gles,axis=0)
+        dre = np.max(gles+grid_dims*grid_dds,axis=0)
         glis = ((gles - dle)/grid_dds).astype('int64')
         gris = glis + gdims
 
@@ -183,17 +183,17 @@
 
         ## --------- Done with top level nodes --------- ##
 
-        pars_g.attrs['refine_by'] = na.int64(1)
-        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['refine_by'] = np.int64(1)
+        pars_g.attrs['dimensionality'] = np.int64(3)
         pars_g.attrs['domain_dimensions'] = ddims
         pars_g.attrs['current_time'] = self.current_time
         pars_g.attrs['domain_left_edge'] = dle
         pars_g.attrs['domain_right_edge'] = dre
         pars_g.attrs['unique_identifier'] = 'athenatest'
-        pars_g.attrs['cosmological_simulation'] = na.int64(0)
-        pars_g.attrs['num_ghost_zones'] = na.int64(0)
-        pars_g.attrs['field_ordering'] = na.int64(1)
-        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+        pars_g.attrs['cosmological_simulation'] = np.int64(0)
+        pars_g.attrs['num_ghost_zones'] = np.int64(0)
+        pars_g.attrs['field_ordering'] = np.int64(1)
+        pars_g.attrs['boundary_conditions'] = np.int64([0]*6) # For Now
 
         # Extra pars:
         # pars_g.attrs['n_cells'] = grid['ncells']
@@ -224,18 +224,18 @@
                 splitup = line.strip().split()
 
                 if "DIMENSIONS" in splitup:
-                    grid_dims = na.array(splitup[-3:]).astype('int')
+                    grid_dims = np.array(splitup[-3:]).astype('int')
                     line = f.readline()
                     continue
                 elif "CELL_DATA" in splitup:
                     grid_ncells = int(splitup[-1])
                     line = f.readline()
-                    if na.prod(grid_dims) != grid_ncells:
+                    if np.prod(grid_dims) != grid_ncells:
                         grid_dims -= 1
                         grid_dims[grid_dims==0]=1
-                    if na.prod(grid_dims) != grid_ncells:
+                    if np.prod(grid_dims) != grid_ncells:
                         print 'product of dimensions %i not equal to number of cells %i' % \
-                              (na.prod(grid_dims), grid_ncells)
+                              (np.prod(grid_dims), grid_ncells)
                         raise TypeError
                     break
                 else:
@@ -250,7 +250,7 @@
                     if not read_table:
                         line = f.readline() # Read the lookup table line
                         read_table = True
-                    data = na.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F')
+                    data = np.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F')
                     if i == 0:
                         self.fields.append(field)
                     # print 'writing field %s' % field
@@ -259,7 +259,7 @@
 
                 elif 'VECTORS' in splitup:
                     field = splitup[1]
-                    data = na.fromfile(f, dtype='>f4', count=3*grid_ncells)
+                    data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
                     data_x = data[0::3].reshape(grid_dims,order='F')
                     data_y = data[1::3].reshape(grid_dims,order='F')
                     data_z = data[2::3].reshape(grid_dims,order='F')
@@ -291,7 +291,7 @@
             if name in self.field_conversions.keys():
                 this_field.attrs['field_to_cgs'] = self.field_conversions[name]
             else:
-                this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+                this_field.attrs['field_to_cgs'] = np.float64('1.0') # For Now
             
 
     def convert(self, hierarchy=True, data=True):
@@ -327,11 +327,11 @@
         elif "Really" in splitup:
             grid['time'] = splitup[-1]
         elif "DIMENSIONS" in splitup:
-            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+            grid['dimensions'] = np.array(splitup[-3:]).astype('int')
         elif "ORIGIN" in splitup:
-            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+            grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
         elif "SPACING" in splitup:
-            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+            grid['dds'] = np.array(splitup[-3:]).astype('float64')
         elif "CELL_DATA" in splitup:
             grid["ncells"] = int(splitup[-1])
         elif "SCALARS" in splitup:
@@ -365,19 +365,19 @@
             #    print line
 
             if len(line) == 0: break
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 grid['dimensions'] -= 1
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 print 'product of dimensions %i not equal to number of cells %i' % \
-                      (na.prod(grid['dimensions']), grid['ncells'])
+                      (np.prod(grid['dimensions']), grid['ncells'])
                 raise TypeError
 
             if grid['read_type'] is 'scalar':
                 grid[grid['read_field']] = \
-                    na.fromfile(f, dtype='>f4', count=grid['ncells']).reshape(grid['dimensions'],order='F')
+                    np.fromfile(f, dtype='>f4', count=grid['ncells']).reshape(grid['dimensions'],order='F')
                 self.fields.append(grid['read_field'])
             elif grid['read_type'] is 'vector':
-                data = na.fromfile(f, dtype='>f4', count=3*grid['ncells'])
+                data = np.fromfile(f, dtype='>f4', count=3*grid['ncells'])
                 grid[grid['read_field']+'_x'] = data[0::3].reshape(grid['dimensions'],order='F')
                 grid[grid['read_field']+'_y'] = data[1::3].reshape(grid['dimensions'],order='F')
                 grid[grid['read_field']+'_z'] = data[2::3].reshape(grid['dimensions'],order='F')
@@ -398,7 +398,7 @@
 
         ## --------- Begin level nodes --------- ##
         g = f.create_group('gridded_data_format')
-        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['format_version']=np.float32(1.0)
         g.attrs['data_software']='athena'
         data_g = f.create_group('data')
         field_g = f.create_group('field_types')
@@ -406,8 +406,8 @@
         pars_g = f.create_group('simulation_parameters')
 
         dle = grid['left_edge'] # True only in this case of one grid for the domain
-        gles = na.array([grid['left_edge']])
-        gdims = na.array([grid['dimensions']])
+        gles = np.array([grid['left_edge']])
+        gdims = np.array([grid['dimensions']])
         glis = ((gles - dle)/grid['dds']).astype('int64')
         gris = glis + gdims
 
@@ -416,18 +416,18 @@
         # grid_dimensions
         gdim = f.create_dataset('grid_dimensions',data=gdims)
 
-        levels = na.array([0]).astype('int64') # unigrid example
+        levels = np.array([0]).astype('int64') # unigrid example
         # grid_level
         level = f.create_dataset('grid_level',data=levels)
 
         ## ----------QUESTIONABLE NEXT LINE--------- ##
         # This data needs two dimensions for now. 
-        n_particles = na.array([[0]]).astype('int64')
+        n_particles = np.array([[0]]).astype('int64')
         #grid_particle_count
         part_count = f.create_dataset('grid_particle_count',data=n_particles)
 
         # Assume -1 means no parent.
-        parent_ids = na.array([-1]).astype('int64')
+        parent_ids = np.array([-1]).astype('int64')
         # grid_parent_id
         pids = f.create_dataset('grid_parent_id',data=parent_ids)
 
@@ -451,8 +451,8 @@
 
         ## --------- Attribute Tables --------- ##
 
-        pars_g.attrs['refine_by'] = na.int64(1)
-        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['refine_by'] = np.int64(1)
+        pars_g.attrs['dimensionality'] = np.int64(3)
         pars_g.attrs['domain_dimensions'] = grid['dimensions']
         try:
             pars_g.attrs['current_time'] = grid['time']
@@ -461,10 +461,10 @@
         pars_g.attrs['domain_left_edge'] = grid['left_edge'] # For Now
         pars_g.attrs['domain_right_edge'] = grid['right_edge'] # For Now
         pars_g.attrs['unique_identifier'] = 'athenatest'
-        pars_g.attrs['cosmological_simulation'] = na.int64(0)
-        pars_g.attrs['num_ghost_zones'] = na.int64(0)
-        pars_g.attrs['field_ordering'] = na.int64(0)
-        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+        pars_g.attrs['cosmological_simulation'] = np.int64(0)
+        pars_g.attrs['num_ghost_zones'] = np.int64(0)
+        pars_g.attrs['field_ordering'] = np.int64(0)
+        pars_g.attrs['boundary_conditions'] = np.int64([0]*6) # For Now
 
         # Extra pars:
         pars_g.attrs['n_cells'] = grid['ncells']
@@ -481,7 +481,7 @@
         if name in self.field_conversions.keys():
             this_field.attrs['field_to_cgs'] = self.field_conversions[name]
         else:
-            this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+            this_field.attrs['field_to_cgs'] = np.float64('1.0') # For Now
 
         # Add particle types
         # Nothing to do here


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/grid_data_format/writer.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/writer.py
@@ -0,0 +1,171 @@
+"""
+Writing yt data to a GDF file.
+
+Authors: Casey W. Stark <caseywstark at gmail.com>
+Affiliation: UC Berkeley
+
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Casey W. Stark.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+import os
+
+import h5py
+import numpy as np
+
+from yt import __version__ as yt_version
+
+
+def write_to_gdf(pf, gdf_path, data_author=None, data_comment=None,
+                 particle_type_name="dark_matter"):
+    """
+    Write a parameter file to the given path in the Grid Data Format.
+
+    Parameters
+    ----------
+    pf : StaticOutput object
+        The yt data to write out.
+    gdf_path : string
+        The path of the file to output.
+
+    """
+    # Make sure we have the absolute path to the file first
+    gdf_path = os.path.abspath(gdf_path)
+
+    # Stupid check -- is the file already there?
+    # @todo: make this a specific exception/error.
+    if os.path.exists(gdf_path):
+        raise IOError("A file already exists in the location: %s. Please provide a new one or remove that file." % gdf_path)
+
+    ###
+    # Create and open the file with h5py
+    ###
+    f = h5py.File(gdf_path, "w")
+
+    ###
+    # "gridded_data_format" group
+    ###
+    g = f.create_group("gridded_data_format")
+    g.attrs["data_software"] = "yt"
+    g.attrs["data_software_version"] = yt_version
+    if data_author is not None:
+        g.attrs["data_author"] = data_author
+    if data_comment is not None:
+        g.attrs["data_comment"] = data_comment
+
+    ###
+    # "simulation_parameters" group
+    ###
+    g = f.create_group("simulation_parameters")
+    g.attrs["refine_by"] = pf.refine_by
+    g.attrs["dimensionality"] = pf.dimensionality
+    g.attrs["domain_dimensions"] = pf.domain_dimensions
+    g.attrs["current_time"] = pf.current_time
+    g.attrs["domain_left_edge"] = pf.domain_left_edge
+    g.attrs["domain_right_edge"] = pf.domain_right_edge
+    g.attrs["unique_identifier"] = pf.unique_identifier
+    g.attrs["cosmological_simulation"] = pf.cosmological_simulation
+    # @todo: Where is this in the yt API?
+    g.attrs["num_ghost_zones"] = 0
+    # @todo: Where is this in the yt API?
+    g.attrs["field_ordering"] = 0
+    # @todo: not yet supported by yt.
+    g.attrs["boundary_conditions"] = np.array([0, 0, 0, 0, 0, 0], 'int32')
+
+    if pf.cosmological_simulation:
+        g.attrs["current_redshift"] = pf.current_redshift
+        g.attrs["omega_matter"] = pf.omega_matter
+        g.attrs["omega_lambda"] = pf.omega_lambda
+        g.attrs["hubble_constant"] = pf.hubble_constant
+
+    ###
+    # "field_types" group
+    ###
+    g = f.create_group("field_types")
+
+    # Which field list should we iterate over?
+    for field_name in pf.h.field_list:
+        # create the subgroup with the field's name
+        sg = g.create_group(field_name)
+
+        # grab the display name and units from the field info container.
+        display_name = pf.field_info[field_name].display_name
+        units = pf.field_info[field_name].get_units()
+
+        # check that they actually contain something...
+        if display_name:
+            sg.attrs["field_name"] = display_name
+        else:
+            sg.attrs["field_name"] = field_name
+        if units:
+            sg.attrs["field_units"] = units
+        else:
+            sg.attrs["field_units"] = "None"
+        # @todo: the values must be in CGS already right?
+        sg.attrs["field_to_cgs"] = 1.0
+        # @todo: is this always true?
+        sg.attrs["staggering"] = 0
+
+    ###
+    # "particle_types" group
+    ###
+    g = f.create_group("particle_types")
+
+    # @todo: Particle type iterator
+    sg = g.create_group(particle_type_name)
+    sg["particle_type_name"] = particle_type_name
+
+    ###
+    # root datasets -- info about the grids
+    ###
+    f["grid_dimensions"] = pf.h.grid_dimensions
+    f["grid_left_index"] = np.array(
+            [g.get_global_startindex() for g in pf.h.grids]
+    ).reshape(pf.h.grid_dimensions.shape[0], 3)
+    f["grid_level"] = pf.h.grid_levels
+    # @todo: Fill with proper values
+    f["grid_parent_id"] = -np.ones(pf.h.grid_dimensions.shape[0])
+    f["grid_particle_count"] = pf.h.grid_particle_count
+
+    ###
+    # "data" group -- where we should spend the most time
+    ###
+    g = f.create_group("data")
+
+    for grid in pf.h.grids:
+        # add group for this grid
+
+        grid_group = g.create_group("grid_%010i" % grid.id)
+        # add group for the particles on this grid
+        particles_group = grid_group.create_group("particles")
+        pt_group = particles_group.create_group(particle_type_name)
+
+        # add the field data to the grid group
+        for field_name in pf.h.field_list:
+            # Check if this is a real field or particle data.
+            field_obj = pf.field_info[field_name]
+
+            if field_obj.particle_type:  # particle data
+                pt_group[field_name] = grid.get_data(field_name)
+            else:  # a field
+                grid_group[field_name] = grid.get_data(field_name)
+
+    # don't forget to close the file.
+    f.close()




diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/kdtree/__init__.py
--- a/yt/utilities/kdtree/__init__.py
+++ b/yt/utilities/kdtree/__init__.py
@@ -1,1 +0,0 @@
-from fKDpy import *


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/kdtree/api.py
--- /dev/null
+++ b/yt/utilities/kdtree/api.py
@@ -0,0 +1,9 @@
+from fKDpy import \
+    chainHOP_tags_dens, \
+    create_tree, \
+    fKD, \
+    find_nn_nearest_neighbors, \
+    free_tree, \
+    find_chunk_nearest_neighbors
+
+


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/kdtree/test.py
--- a/yt/utilities/kdtree/test.py
+++ /dev/null
@@ -1,58 +0,0 @@
-from Forthon import *
-from fKDpy import *
-import numpy,random
-
-n = 32768
-
-
-fKD.tags = fzeros((64),'i')
-fKD.dist = fzeros((64),'d')
-fKD.pos = fzeros((3,n),'d')
-fKD.nn = 64
-fKD.nparts = n
-fKD.sort = True
-fKD.rearrange = True
-fKD.qv = numpy.array([16./32, 16./32, 16./32])
-
-fp = open('parts.txt','r')
-xpos = []
-ypos = []
-zpos = []
-line = fp.readline()
-while line:
-    line = line.split()
-    xpos.append(float(line[0]))
-    ypos.append(float(line[1]))
-    zpos.append(float(line[2]))
-    line= fp.readline()
-
-fp.close()
-
-
-for k in range(32):
-    for j in range(32):
-        for i in range(32):
-            fKD.pos[0][i + j*32 + k*1024] = float(i)/32 + 1./64 + 0.0001*random.random()
-            fKD.pos[1][i + j*32 + k*1024] = float(j)/32 + 1./64 + 0.0001*random.random()
-            fKD.pos[2][i + j*32 + k*1024] = float(k)/32 + 1./64 + 0.0001*random.random()
-
-            
-
-#print fKD.pos[0][0],fKD.pos[1][0],fKD.pos[2][0]
-
-create_tree()
-
-
-find_nn_nearest_neighbors()
-
-#print 'next'
-
-#fKD.qv = numpy.array([0., 0., 0.])
-
-#find_nn_nearest_neighbors()
-
-
-#print (fKD.tags - 1)
-#print fKD.dist
-
-free_tree()


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/lib/RayIntegrators.pyx
--- a/yt/utilities/lib/RayIntegrators.pyx
+++ b/yt/utilities/lib/RayIntegrators.pyx
@@ -1,4 +1,4 @@
-"""
+""" 
 Simle integrators for the radiative transfer equation
 
 Author: Matthew Turk <matthewturk at gmail.com>


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/lib/alt_ray_tracers.pyx
--- /dev/null
+++ b/yt/utilities/lib/alt_ray_tracers.pyx
@@ -0,0 +1,230 @@
+"""Ray tracing algorithms for rays in non-cartesian coordinate systems.
+
+Author: Anthony Scopatz <scopatz at gmail.com>
+Affiliation: University of Chicago
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Anthony Scopatz.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+cimport numpy as np
+cimport cython
+cimport libc.math as math
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def _cyl2cart(np.ndarray[np.float64_t, ndim=2] x):
+    """Converts points in cylindrical coordinates to cartesian points."""
+    # NOTE this should be removed once the coord interface comes online
+    cdef int i, I
+    cdef np.ndarray[np.float64_t, ndim=2] xcart 
+    I = x.shape[0]
+    xcart = np.empty((I, x.shape[1]), dtype='float64')
+    for i in range(I):
+        xcart[i,0] = x[i,0] * math.cos(x[i,2])
+        xcart[i,1] = x[i,0] * math.sin(x[i,2])
+        xcart[i,2] = x[i,1]
+    return xcart
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def _cart_intersect(np.ndarray[np.float64_t, ndim=1] a,
+                    np.ndarray[np.float64_t, ndim=1] b,
+                    np.ndarray[np.float64_t, ndim=2] c,
+                    np.ndarray[np.float64_t, ndim=2] d):
+    """Finds the times and locations of the lines defined by a->b and 
+    c->d in cartesian space.  a and b must be 1d points.  c and d must 
+    be 2d arrays of equal shape whose second dim is the same length as
+    a and b.
+    """
+    cdef int i, I
+    cdef np.ndarray[np.float64_t, ndim=1] r, s, t
+    cdef np.ndarray[np.float64_t, ndim=2] loc
+    I = c.shape[0]
+    shape = (I, c.shape[1])
+    t = np.empty(I, dtype='float64')
+    loc = np.empty(shape, dtype='float64')
+
+    r = b - a
+    for i in range(I):
+        s = d[i] - c[i]
+        t[i] = (np.cross(c[i] - a, s).sum()) / (np.cross(r, s).sum())
+        loc[i] = a + t[i]*r
+    return t, loc
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def clyindrical_ray_trace(np.ndarray[np.float64_t, ndim=1] p1, 
+                          np.ndarray[np.float64_t, ndim=1] p2, 
+                          np.ndarray[np.float64_t, ndim=2] left_edges, 
+                          np.ndarray[np.float64_t, ndim=2] right_edges):
+    """Computes straight (cartesian) rays being traced through a 
+    cylindrical geometry.
+
+    Parameters
+    ----------
+    p1 : length 3 float ndarray
+        start point for ray
+    p2 : length 3 float ndarray
+        stop point for ray
+    left_edges : 2d ndarray
+        left edges of grid cells
+    right_edges : 2d ndarray
+        right edges of grid cells
+
+    Returns
+    -------
+    t : 1d float ndarray
+        ray parametric time on range [0,1]
+    s : 1d float ndarray
+        ray parametric distance on range [0,len(ray)]
+    rztheta : 2d float ndarray
+        ray grid cell intersections in cylidrical coordinates
+    inds : 1d int ndarray
+        indexes into the grid cells which the ray crosses in order.
+
+    """
+    cdef int i, I
+    cdef np.float64_t a, b, bsqrd, twoa
+    cdef np.ndarray[np.float64_t, ndim=1] dp, p1cart, p2cart, dpcart, t, s, \
+                                          rleft, rright, zleft, zright, \
+                                          cleft, cright, thetaleft, thetaright, \
+                                          tmleft, tpleft, tmright, tpright, tsect
+    cdef np.ndarray[np.int64_t, ndim=1] inds, tinds, sinds
+    cdef np.ndarray[np.float64_t, ndim=2] xyz, rztheta, ptemp, b1, b2, dsect
+
+    # set up  points
+    dp = p2 - p1
+    ptemp = np.array([p1, p2])
+    ptemp = _cyl2cart(ptemp)
+    p1cart = ptemp[0]
+    p2cart = ptemp[1]
+    dpcart = p2cart - p1cart
+
+    # set up components
+    rleft = left_edges[:,0]
+    rright = right_edges[:,0]
+    zleft = left_edges[:,1]
+    zright = right_edges[:,1]
+
+    a = (dpcart[0]**2) + (dpcart[1]**2)
+    b = (2*dpcart[0]*p1cart[0]) + (2*dpcart[1]*p1cart[1])
+    cleft = ((p1cart[0]**2) + (p1cart[1]**2)) - rleft**2
+    cright = ((p1cart[0]**2) + (p1cart[1]**2)) - rright**2
+    twoa = 2*a
+    bsqrd = b**2
+
+    # Compute positive and negative times and associated masks
+    I = left_edges.shape[0]
+    tmleft = np.empty(I, dtype='float64')
+    tpleft = np.empty(I, dtype='float64')
+    tmright = np.empty(I, dtype='float64')
+    tpright = np.empty(I, dtype='float64')
+    for i in range(I):
+        tmleft[i] = (-b - math.sqrt(bsqrd - 4*a*cleft[i])) / twoa
+        tpleft[i] = (-b + math.sqrt(bsqrd - 4*a*cleft[i])) / twoa  
+        tmright[i] = (-b - math.sqrt(bsqrd - 4*a*cright[i])) / twoa
+        tpright[i] = (-b + math.sqrt(bsqrd - 4*a*cright[i])) / twoa
+
+    tmmright = np.logical_and(~np.isnan(tmright), rright <= p1[0])
+    tpmright = np.logical_and(~np.isnan(tpright), rright <= p2[0])
+
+    tmmleft = np.logical_and(~np.isnan(tmleft), rleft <= p1[0])
+    tpmleft = np.logical_and(~np.isnan(tpleft), rleft <= p2[0])
+
+    # compute first cut of indexes and thetas, which 
+    # have been filtered by those values for which intersection
+    # times are impossible (see above masks). Note that this is
+    # still independnent of z.
+    inds = np.unique(np.concatenate([np.argwhere(tmmleft).flat, 
+                                     np.argwhere(tpmleft).flat, 
+                                     np.argwhere(tmmright).flat, 
+                                     np.argwhere(tpmright).flat,]))
+    if 0 == inds.shape[0]:
+        inds = np.arange(I)
+        thetaleft = np.empty(I)
+        thetaleft.fill(p1[2])
+        thetaright = np.empty(I)
+        thetaleft.fill(p2[2])
+    else:
+        rleft = rleft[inds]
+        rright = rright[inds]
+
+        zleft = zleft[inds]
+        zright = zright[inds]
+
+        thetaleft = np.arctan2((p1cart[1] + tmleft[inds]*dpcart[1]), 
+                               (p1cart[0] + tmleft[inds]*dpcart[0]))
+        nans = np.isnan(thetaleft)
+        thetaleft[nans] = np.arctan2((p1cart[1] + tpleft[inds[nans]]*dpcart[1]), 
+                                     (p1cart[0] + tpleft[inds[nans]]*dpcart[0]))
+        thetaright = np.arctan2((p1cart[1] + tmright[inds]*dpcart[1]), 
+                                (p1cart[0] + tmright[inds]*dpcart[0]))
+        nans = np.isnan(thetaright)
+        thetaright[nans] = np.arctan2((p1cart[1] + tpright[inds[nans]]*dpcart[1]), 
+                                      (p1cart[0] + tpright[inds[nans]]*dpcart[0]))
+
+    # Set up the cell boundary arrays    
+    b1 = np.concatenate([[rleft,  zright, thetaleft],
+                         [rleft,  zleft,  thetaleft],
+                         [rleft,  zleft,  thetaleft],
+                         [rright, zleft,  thetaright],
+                         [rleft,  zleft,  thetaleft],
+                         [rright, zright, thetaleft],
+                         [rleft,  zright, thetaleft],
+                         [rright, zleft,  thetaleft],
+                         ], axis=1).T
+
+    b2 = np.concatenate([[rright, zright, thetaright],
+                         [rright, zleft,  thetaright], 
+                         [rleft,  zright, thetaleft], 
+                         [rright, zright, thetaright], 
+                         [rleft,  zleft,  thetaright], 
+                         [rright, zright, thetaright], 
+                         [rleft,  zright, thetaright], 
+                         [rright, zleft,  thetaright],
+                         ], axis=1).T
+
+    inds = np.concatenate([inds, inds, inds, inds, inds, inds, inds, inds])
+
+    # find intersections and compute return values
+    tsect, dsect = _cart_intersect(p1cart, p2cart, _cyl2cart(b1), _cyl2cart(b2))
+    tmask = np.logical_and(0.0<=tsect, tsect<=1.0)
+    tsect, tinds = np.unique(tsect[tmask], return_index=True)
+    inds = inds[tmask][tinds]
+    xyz = dsect[tmask][tinds]
+    s = np.sqrt(((xyz - p1cart)**2).sum(axis=1))
+    s, sinds = np.unique(s, return_index=True)
+    inds = inds[sinds]
+    xyz = xyz[sinds]
+    t = s/np.sqrt((dpcart**2).sum())
+    sinds = s.argsort()
+    s = s[sinds]
+    t = t[sinds]
+    inds = inds[sinds]
+    xyz = xyz[sinds]
+    rztheta = np.concatenate([np.sqrt(xyz[:,0]**2 + xyz[:,1]**2)[:,np.newaxis], 
+                              xyz[:,2:3],
+                              np.arctan2(xyz[:,1], xyz[:,0])[:,np.newaxis]], axis=1)
+    return t, s, rztheta, inds
+    #rztheta[:,2] = 0.0 + (rztheta[:,2] - np.pi*3/2)%(2*np.pi)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -53,8 +53,8 @@
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def read_and_seek(char *filename, int offset1, int offset2,
-                  np.ndarray buffer, int bytes):
+def read_and_seek(char *filename, np.int64_t offset1,
+                  np.int64_t offset2, np.ndarray buffer, int bytes):
     cdef FILE *f = fopen(filename, "rb")
     cdef void *buf = <void *> buffer.data
     cdef char line[1024]


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -352,3 +352,48 @@
             positions[i, j] = p[j]
     return positions
 
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def obtain_rv_vec(data):
+    # This is just to let the pointers exist and whatnot.  We can't cdef them
+    # inside conditionals.
+    cdef np.ndarray[np.float64_t, ndim=1] vxf
+    cdef np.ndarray[np.float64_t, ndim=1] vyf
+    cdef np.ndarray[np.float64_t, ndim=1] vzf
+    cdef np.ndarray[np.float64_t, ndim=2] rvf
+    cdef np.ndarray[np.float64_t, ndim=3] vxg
+    cdef np.ndarray[np.float64_t, ndim=3] vyg
+    cdef np.ndarray[np.float64_t, ndim=3] vzg
+    cdef np.ndarray[np.float64_t, ndim=4] rvg
+    cdef np.float64_t bv[3]
+    cdef int i, j, k
+    bulk_velocity = data.get_field_parameter("bulk_velocity")
+    if bulk_velocity == None:
+        bulk_velocity = np.zeros(3)
+    bv[0] = bulk_velocity[0]; bv[1] = bulk_velocity[1]; bv[2] = bulk_velocity[2]
+    if len(data['x-velocity'].shape) == 1:
+        # One dimensional data
+        vxf = data['x-velocity'].astype("float64")
+        vyf = data['y-velocity'].astype("float64")
+        vzf = data['z-velocity'].astype("float64")
+        rvf = np.empty((3, vxf.shape[0]), 'float64')
+        for i in range(vxf.shape[0]):
+            rvf[0, i] = vxf[i] - bv[0]
+            rvf[1, i] = vyf[i] - bv[1]
+            rvf[2, i] = vzf[i] - bv[2]
+        return rvf
+    else:
+        # Three dimensional data
+        vxg = data['x-velocity'].astype("float64")
+        vyg = data['y-velocity'].astype("float64")
+        vzg = data['z-velocity'].astype("float64")
+        rvg = np.empty((3, vxg.shape[0], vxg.shape[1], vxg.shape[2]), 'float64')
+        for i in range(vxg.shape[0]):
+            for j in range(vxg.shape[1]):
+                for k in range(vxg.shape[2]):
+                    rvg[0,i,j,k] = vxg[i,j,k] - bv[0]
+                    rvg[1,i,j,k] = vyg[i,j,k] - bv[1]
+                    rvg[2,i,j,k] = vzg[i,j,k] - bv[2]
+        return rvg


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -590,7 +590,7 @@
         cdef np.float64_t *pointer = <np.float64_t *> star_colors.data
         for i in range(pos_x.shape[0]):
             kdtree_utils.kd_insert3(self.tree,
-                pos_x[i], pos_y[i], pos_z[i], pointer + i*3)
+                pos_x[i], pos_y[i], pos_z[i], <void *> (pointer + i*3))
 
     def __dealloc__(self):
         kdtree_utils.kd_free(self.tree)
@@ -616,7 +616,7 @@
     cdef np.float64_t slopes[6], dp[3], ds[3]
     cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
     cdef np.float64_t dvs[6], cell_left[3], local_dds[3], pos[3]
-    cdef int nstars
+    cdef int nstars, dti, i, j
     cdef np.float64_t *colors = NULL, gexp, gaussian, px, py, pz
     for i in range(3):
         dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
@@ -648,6 +648,7 @@
         dvs[i] = temp
     for dti in range(vri.n_samples): 
         # Now we add the contribution from stars
+        kdtree_utils.kd_res_rewind(ballq)
         for i in range(nstars):
             kdtree_utils.kd_res_item3(ballq, &px, &py, &pz)
             colors = <np.float64_t *> kdtree_utils.kd_res_item_data(ballq)
@@ -655,20 +656,22 @@
             gexp = (px - pos[0])*(px - pos[0]) \
                  + (py - pos[1])*(py - pos[1]) \
                  + (pz - pos[2])*(pz - pos[2])
-            gaussian = vri.star_coeff * expl(-gexp/vri.star_sigma_num)
-            for i in range(3): im.rgba[i] += gaussian*dt*colors[i]
+            gaussian = vri.star_coeff * exp(-gexp/vri.star_sigma_num)
+            for j in range(3): im.rgba[j] += gaussian*dt*colors[j]
         for i in range(3):
             pos[i] += local_dds[i]
         FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits, vri.fits,
                           vri.field_table_ids, vri.grey_opacity)
         for i in range(vc.n_fields):
             dvs[i] += slopes[i]
+    kdtree_utils.kd_res_free(ballq)
 
 cdef class VolumeRenderSampler(ImageSampler):
     cdef VolumeRenderAccumulator *vra
     cdef public object tf_obj
     cdef public object my_field_tables
     cdef kdtree_utils.kdtree **trees
+    cdef object tree_containers
     def __cinit__(self, 
                   np.ndarray vp_pos,
                   np.ndarray vp_dir,
@@ -709,6 +712,7 @@
             self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
         self.supp_data = <void *> self.vra
         cdef star_kdtree_container skdc
+        self.tree_containers = star_list
         if star_list is None:
             self.trees = NULL
         else:
@@ -719,10 +723,15 @@
                 self.trees[i] = skdc.tree
 
     cdef void setup(self, PartitionedGrid pg):
+        cdef star_kdtree_container star_tree
         if self.trees == NULL:
             self.sampler = volume_render_sampler
         else:
+            star_tree = self.tree_containers[pg.parent_grid_id]
             self.vra.star_list = self.trees[pg.parent_grid_id]
+            self.vra.star_sigma_num = 2.0*star_tree.sigma**2.0
+            self.vra.star_er = 2.326 * star_tree.sigma
+            self.vra.star_coeff = star_tree.coeff
             self.sampler = volume_render_stars_sampler
 
     def __dealloc__(self):


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -292,49 +292,6 @@
         return rv
     raise KeyError
 
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-def obtain_rvec(data):
-    # This is just to let the pointers exist and whatnot.  We can't cdef them
-    # inside conditionals.
-    cdef np.ndarray[np.float64_t, ndim=1] xf
-    cdef np.ndarray[np.float64_t, ndim=1] yf
-    cdef np.ndarray[np.float64_t, ndim=1] zf
-    cdef np.ndarray[np.float64_t, ndim=2] rf
-    cdef np.ndarray[np.float64_t, ndim=3] xg
-    cdef np.ndarray[np.float64_t, ndim=3] yg
-    cdef np.ndarray[np.float64_t, ndim=3] zg
-    cdef np.ndarray[np.float64_t, ndim=4] rg
-    cdef np.float64_t c[3]
-    cdef int i, j, k
-    center = data.get_field_parameter("center")
-    c[0] = center[0]; c[1] = center[1]; c[2] = center[2]
-    if len(data['x'].shape) == 1:
-        # One dimensional data
-        xf = data['x']
-        yf = data['y']
-        zf = data['z']
-        rf = np.empty((3, xf.shape[0]), 'float64')
-        for i in range(xf.shape[0]):
-            rf[0, i] = xf[i] - c[0]
-            rf[1, i] = yf[i] - c[1]
-            rf[2, i] = zf[i] - c[2]
-        return rf
-    else:
-        # Three dimensional data
-        xg = data['x']
-        yg = data['y']
-        zg = data['z']
-        rg = np.empty((3, xg.shape[0], xg.shape[1], xg.shape[2]), 'float64')
-        for i in range(xg.shape[0]):
-            for j in range(xg.shape[1]):
-                for k in range(xg.shape[2]):
-                    rg[0,i,j,k] = xg[i,j,k] - c[0]
-                    rg[1,i,j,k] = yg[i,j,k] - c[1]
-                    rg[2,i,j,k] = zg[i,j,k] - c[2]
-        return rg
-
 @cython.cdivision(True)
 def pixelize_cylinder(np.ndarray[np.float64_t, ndim=1] radius,
                       np.ndarray[np.float64_t, ndim=1] dradius,
@@ -383,3 +340,91 @@
             theta_i += dthetamin
 
     return img
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def obtain_rvec(data):
+    # This is just to let the pointers exist and whatnot.  We can't cdef them
+    # inside conditionals.
+    cdef np.ndarray[np.float64_t, ndim=1] xf
+    cdef np.ndarray[np.float64_t, ndim=1] yf
+    cdef np.ndarray[np.float64_t, ndim=1] zf
+    cdef np.ndarray[np.float64_t, ndim=2] rf
+    cdef np.ndarray[np.float64_t, ndim=3] xg
+    cdef np.ndarray[np.float64_t, ndim=3] yg
+    cdef np.ndarray[np.float64_t, ndim=3] zg
+    cdef np.ndarray[np.float64_t, ndim=4] rg
+    cdef np.float64_t c[3]
+    cdef int i, j, k
+    center = data.get_field_parameter("center")
+    c[0] = center[0]; c[1] = center[1]; c[2] = center[2]
+    if len(data['x'].shape) == 1:
+        # One dimensional data
+        xf = data['x']
+        yf = data['y']
+        zf = data['z']
+        rf = np.empty((3, xf.shape[0]), 'float64')
+        for i in range(xf.shape[0]):
+            rf[0, i] = xf[i] - c[0]
+            rf[1, i] = yf[i] - c[1]
+            rf[2, i] = zf[i] - c[2]
+        return rf
+    else:
+        # Three dimensional data
+        xg = data['x']
+        yg = data['y']
+        zg = data['z']
+        rg = np.empty((3, xg.shape[0], xg.shape[1], xg.shape[2]), 'float64')
+        for i in range(xg.shape[0]):
+            for j in range(xg.shape[1]):
+                for k in range(xg.shape[2]):
+                    rg[0,i,j,k] = xg[i,j,k] - c[0]
+                    rg[1,i,j,k] = yg[i,j,k] - c[1]
+                    rg[2,i,j,k] = zg[i,j,k] - c[2]
+        return rg
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def obtain_rv_vec(data):
+    # This is just to let the pointers exist and whatnot.  We can't cdef them
+    # inside conditionals.
+    cdef np.ndarray[np.float64_t, ndim=1] vxf
+    cdef np.ndarray[np.float64_t, ndim=1] vyf
+    cdef np.ndarray[np.float64_t, ndim=1] vzf
+    cdef np.ndarray[np.float64_t, ndim=2] rvf
+    cdef np.ndarray[np.float64_t, ndim=3] vxg
+    cdef np.ndarray[np.float64_t, ndim=3] vyg
+    cdef np.ndarray[np.float64_t, ndim=3] vzg
+    cdef np.ndarray[np.float64_t, ndim=4] rvg
+    cdef np.float64_t bv[3]
+    cdef int i, j, k
+    bulk_velocity = data.get_field_parameter("bulk_velocity")
+    if bulk_velocity == None:
+        bulk_velocity = np.zeros(3)
+    bv[0] = bulk_velocity[0]; bv[1] = bulk_velocity[1]; bv[2] = bulk_velocity[2]
+    if len(data['x-velocity'].shape) == 1:
+        # One dimensional data
+        vxf = data['x-velocity'].astype("float64")
+        vyf = data['y-velocity'].astype("float64")
+        vzf = data['z-velocity'].astype("float64")
+        rvf = np.empty((3, vxf.shape[0]), 'float64')
+        for i in range(vxf.shape[0]):
+            rvf[0, i] = vxf[i] - bv[0]
+            rvf[1, i] = vyf[i] - bv[1]
+            rvf[2, i] = vzf[i] - bv[2]
+        return rvf
+    else:
+        # Three dimensional data
+        vxg = data['x-velocity'].astype("float64")
+        vyg = data['y-velocity'].astype("float64")
+        vzg = data['z-velocity'].astype("float64")
+        rvg = np.empty((3, vxg.shape[0], vxg.shape[1], vxg.shape[2]), 'float64')
+        for i in range(vxg.shape[0]):
+            for j in range(vxg.shape[1]):
+                for k in range(vxg.shape[2]):
+                    rvg[0,i,j,k] = vxg[i,j,k] - bv[0]
+                    rvg[1,i,j,k] = vyg[i,j,k] - bv[1]
+                    rvg[2,i,j,k] = vzg[i,j,k] - bv[2]
+        return rvg


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -135,6 +135,9 @@
     config.add_extension("Interpolators", 
                 ["yt/utilities/lib/Interpolators.pyx"],
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+    config.add_extension("alt_ray_tracers", 
+                ["yt/utilities/lib/alt_ray_tracers.pyx"],
+                libraries=["m"], depends=[])
     config.add_extension("marching_cubes", 
                 ["yt/utilities/lib/marching_cubes.pyx",
                  "yt/utilities/lib/FixedInterpolator.c"],


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/lib/tests/test_alt_ray_tracers.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_alt_ray_tracers.py
@@ -0,0 +1,85 @@
+"""Tests for non-cartesian ray tracers."""
+import nose
+import numpy as np
+
+from nose.tools import assert_equal, assert_not_equal, assert_raises, raises, \
+    assert_almost_equal, assert_true, assert_false, assert_in, assert_less_equal, \
+    assert_greater_equal
+from numpy.testing import assert_array_equal, assert_array_almost_equal
+from yt.testing import amrspace
+
+from yt.utilities.lib.alt_ray_tracers import clyindrical_ray_trace, _cyl2cart
+
+left_grid = right_grid = amr_levels = center_grid = data = None
+
+def setup():
+    # set up some sample cylindrical grid data, radiating out from center
+    global left_grid, right_grid, amr_levels, center_grid
+    np.seterr(all='ignore')
+    l1, r1, lvl1 = amrspace([0.0, 1.0, 0.0, -1.0, 0.0, 2*np.pi], levels=(7,7,0))
+    l2, r2, lvl2 = amrspace([0.0, 1.0, 0.0,  1.0, 0.0, 2*np.pi], levels=(7,7,0))
+    left_grid = np.concatenate([l1,l2], axis=0)
+    right_grid = np.concatenate([r1,r2], axis=0)
+    amr_levels = np.concatenate([lvl1,lvl2], axis=0)
+    center_grid = (left_grid + right_grid) / 2.0
+    data = np.cos(np.sqrt(np.sum(center_grid[:,:2]**2, axis=1)))**2  # cos^2
+
+
+point_pairs = np.array([
+    # p1               p2
+    ([0.5, -1.0, 0.0], [1.0, 1.0, 0.75*np.pi]),  # Everything different
+    ([0.5, -1.0, 0.0], [0.5, 1.0, 0.75*np.pi]),  # r same
+    ([0.5, -1.0, 0.0], [0.5, 1.0, np.pi]),       # diagonal through z-axis
+    # straight through z-axis
+    ([0.5, 0.0, 0.0],  [0.5, 0.0, np.pi]),       
+    #([0.5, 0.0, np.pi*3/2 + 0.0], [0.5, 0.0, np.pi*3/2 + np.pi]),
+    #([0.5, 0.0, np.pi/2 + 0.0], [0.5, 0.0, np.pi/2 + np.pi]),
+    #([0.5, 0.0, np.pi + 0.0], [0.5, 0.0, np.pi + np.pi]),
+    # const z, not through z-axis
+    ([0.5, 0.1, 0.0],  [0.5, 0.1, 0.75*np.pi]),
+    #([0.5, 0.1, np.pi + 0.0], [0.5, 0.1, np.pi + 0.75*np.pi]), 
+    #([0.5, 0.1, np.pi*3/2 + 0.0], [0.5, 0.1, np.pi*3/2 + 0.75*np.pi]),
+    #([0.5, 0.1, np.pi/2 + 0.0], [0.5, 0.1, np.pi/2 + 0.75*np.pi]), 
+    #([0.5, 0.1, 2*np.pi + 0.0], [0.5, 0.1, 2*np.pi + 0.75*np.pi]), 
+    #([0.5, 0.1, np.pi/4 + 0.0], [0.5, 0.1, np.pi/4 + 0.75*np.pi]),
+    #([0.5, 0.1, np.pi*3/8 + 0.0], [0.5, 0.1, np.pi*3/8 + 0.75*np.pi]), 
+    ([0.5, -1.0, 0.75*np.pi], [1.0, 1.0, 0.75*np.pi]),  # r,z different - theta same
+    ([0.5, -1.0, 0.75*np.pi], [0.5, 1.0, 0.75*np.pi]),  # z-axis parallel
+    ([0.0, -1.0, 0.0], [0.0, 1.0, 0.0]),                # z-axis itself
+    ])
+
+
+def check_monotonic_inc(arr):
+    assert_true(np.all(0.0 <= (arr[1:] - arr[:-1])))
+
+def check_bounds(arr, blower, bupper):
+    assert_true(np.all(blower <= arr))
+    assert_true(np.all(bupper >= arr))
+
+
+def test_clyindrical_ray_trace():
+    for pair in point_pairs:
+        p1, p2 = pair
+        p1cart, p2cart =  _cyl2cart(pair)
+        pathlen = np.sqrt(np.sum((p2cart - p1cart)**2))
+
+        t, s, rztheta, inds = clyindrical_ray_trace(p1, p2, left_grid, right_grid)
+        npoints = len(t)
+
+        yield check_monotonic_inc, t
+        yield assert_less_equal, 0.0, t[0]
+        yield assert_less_equal, t[-1], 1.0
+
+        yield check_monotonic_inc, s
+        yield assert_less_equal, 0.0, s[0]
+        yield assert_less_equal, s[-1], pathlen
+        yield assert_equal, npoints, len(s)
+
+        yield assert_equal, (npoints, 3), rztheta.shape
+        yield check_bounds, rztheta[:,0],  0.0, 1.0
+        yield check_bounds, rztheta[:,1], -1.0, 1.0
+        yield check_bounds, rztheta[:,2],  0.0, 2*np.pi
+        yield check_monotonic_inc, rztheta[:,2]
+
+        yield assert_equal, npoints, len(inds)
+        yield check_bounds, inds, 0, len(left_grid)-1


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/lib/tests/test_geometry_utils.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_geometry_utils.py
@@ -0,0 +1,30 @@
+from yt.testing import *
+from yt.utilities.lib import obtain_rvec, obtain_rv_vec
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_obtain_rvec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+    
+    dd = pf.h.sphere((0.5,0.5,0.5), 0.2)
+
+    coords = obtain_rvec(dd)
+
+    r = np.sqrt(np.sum(coords*coords,axis=0))
+
+    assert_array_less(r.max(), 0.2)
+
+    assert_array_less(0.0, r.min())
+
+def test_obtain_rv_vec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+
+    dd = pf.h.all_data()
+
+    vels = obtain_rv_vec(dd)
+
+    assert_array_equal(vels[0,:], dd['x-velocity'])
+    assert_array_equal(vels[1,:], dd['y-velocity'])
+    assert_array_equal(vels[2,:], dd['z-velocity'])


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import yt.utilities.lib as lib
@@ -35,25 +35,26 @@
         self.truncate = truncate
         x0, x1 = boundaries
         self.x_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
 
-        x_i = (na.digitize(x_vals, self.x_bins) - 1).astype('int32')
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)):
+        x_i = (np.digitize(x_vals, self.x_bins) - 1).astype('int32')
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.UnilinearlyInterpolate(self.table, x_vals, self.x_bins, x_i, my_vals)
-        return my_vals.reshape(orig_shape)
+        my_vals.shape = orig_shape
+        return my_vals
 
 class BilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate=False):
@@ -61,32 +62,33 @@
         self.truncate = truncate
         x0, x1, y0, y1 = boundaries
         self.x_name, self.y_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = na.linspace(y0, y1, table.shape[1]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
         y_vals = data_object[self.y_name].ravel().astype('float64')
 
-        x_i = (na.digitize(x_vals, self.x_bins) - 1).astype('int32')
-        y_i = (na.digitize(y_vals, self.y_bins) - 1).astype('int32')
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
-            or na.any((y_i == -1) | (y_i == len(self.y_bins)-1)):
+        x_i = (np.digitize(x_vals, self.x_bins) - 1).astype('int32')
+        y_i = (np.digitize(y_vals, self.y_bins) - 1).astype('int32')
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
+            or np.any((y_i == -1) | (y_i == len(self.y_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
-                y_i = na.minimum(na.maximum(y_i,0), len(self.y_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
+                y_i = np.minimum(np.maximum(y_i,0), len(self.y_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.BilinearlyInterpolate(self.table,
                                  x_vals, y_vals, self.x_bins, self.y_bins,
                                  x_i, y_i, my_vals)
-        return my_vals.reshape(orig_shape)
+        my_vals.shape = orig_shape
+        return my_vals
 
 class TrilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate = False):
@@ -94,9 +96,9 @@
         self.truncate = truncate
         x0, x1, y0, y1, z0, z1 = boundaries
         self.x_name, self.y_name, self.z_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = na.linspace(y0, y1, table.shape[1]).astype('float64')
-        self.z_bins = na.linspace(z0, z1, table.shape[2]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+        self.z_bins = np.linspace(z0, z1, table.shape[2]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
@@ -104,52 +106,29 @@
         y_vals = data_object[self.y_name].ravel().astype('float64')
         z_vals = data_object[self.z_name].ravel().astype('float64')
 
-        x_i = na.digitize(x_vals, self.x_bins) - 1
-        y_i = na.digitize(y_vals, self.y_bins) - 1
-        z_i = na.digitize(z_vals, self.z_bins) - 1
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
-            or na.any((y_i == -1) | (y_i == len(self.y_bins)-1)) \
-            or na.any((z_i == -1) | (z_i == len(self.z_bins)-1)):
+        x_i = np.digitize(x_vals, self.x_bins) - 1
+        y_i = np.digitize(y_vals, self.y_bins) - 1
+        z_i = np.digitize(z_vals, self.z_bins) - 1
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
+            or np.any((y_i == -1) | (y_i == len(self.y_bins)-1)) \
+            or np.any((z_i == -1) | (z_i == len(self.z_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
-                y_i = na.minimum(na.maximum(y_i,0), len(self.y_bins)-2)
-                z_i = na.minimum(na.maximum(z_i,0), len(self.z_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
+                y_i = np.minimum(np.maximum(y_i,0), len(self.y_bins)-2)
+                z_i = np.minimum(np.maximum(z_i,0), len(self.z_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.TrilinearlyInterpolate(self.table,
                                  x_vals, y_vals, z_vals,
                                  self.x_bins, self.y_bins, self.z_bins,
                                  x_i, y_i, z_i, my_vals)
-        return my_vals.reshape(orig_shape)
-
-        # Use notation from Paul Bourke's page on interpolation
-        # http://local.wasp.uwa.edu.au/~pbourke/other/interpolation/
-        x = (x_vals - self.x_bins[x_i]) / (self.x_bins[x_i+1] - self.x_bins[x_i])
-        y = (y_vals - self.y_bins[y_i]) / (self.y_bins[y_i+1] - self.y_bins[y_i])
-        z = (z_vals - self.z_bins[z_i]) / (self.z_bins[z_i+1] - self.z_bins[z_i])
-        xm = (self.x_bins[x_i+1] - x_vals) / (self.x_bins[x_i+1] - self.x_bins[x_i])
-        ym = (self.y_bins[y_i+1] - y_vals) / (self.y_bins[y_i+1] - self.y_bins[y_i])
-        zm = (self.z_bins[z_i+1] - z_vals) / (self.z_bins[z_i+1] - self.z_bins[z_i])
-        if na.any(na.isnan(self.table)):
-            raise ValueError
-        if na.any(na.isnan(x) | na.isnan(y) | na.isnan(z)):
-            raise ValueError
-        if na.any(na.isnan(xm) | na.isnan(ym) | na.isnan(zm)):
-            raise ValueError
-        my_vals  = self.table[x_i  ,y_i  ,z_i  ] * (xm*ym*zm)
-        my_vals += self.table[x_i+1,y_i  ,z_i  ] * (x *ym*zm)
-        my_vals += self.table[x_i  ,y_i+1,z_i  ] * (xm*y *zm)
-        my_vals += self.table[x_i  ,y_i  ,z_i+1] * (xm*ym*z )
-        my_vals += self.table[x_i+1,y_i  ,z_i+1] * (x *ym*z )
-        my_vals += self.table[x_i  ,y_i+1,z_i+1] * (xm*y *z )
-        my_vals += self.table[x_i+1,y_i+1,z_i  ] * (x *y *zm)
-        my_vals += self.table[x_i+1,y_i+1,z_i+1] * (x *y *z )
-        return my_vals.reshape(orig_shape)
+        my_vals.shape = orig_shape
+        return my_vals
 
 def get_centers(pf, filename, center_cols, radius_col, unit='1'):
     """


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import math
 
 def periodic_dist(a, b, period):
@@ -48,20 +48,20 @@
 
     Examples
     --------
-    >>> a = na.array([0.1, 0.1, 0.1])
-    >>> b = na.array([0.9, 0,9, 0.9])
+    >>> a = np.array([0.1, 0.1, 0.1])
+    >>> b = np.array([0.9, 0,9, 0.9])
     >>> period = 1.
     >>> dist = periodic_dist(a, b, 1.)
     >>> dist
     0.3464102
     """
-    a = na.array(a)
-    b = na.array(b)
+    a = np.array(a)
+    b = np.array(b)
     if a.size != b.size: RunTimeError("Arrays must be the same shape.")
-    c = na.empty((2, a.size), dtype="float64")
+    c = np.empty((2, a.size), dtype="float64")
     c[0,:] = abs(a - b)
     c[1,:] = period - abs(a - b)
-    d = na.amin(c, axis=0)**2
+    d = np.amin(c, axis=0)**2
     return math.sqrt(d.sum())
 
 def rotate_vector_3D(a, dim, angle):
@@ -87,8 +87,8 @@
     
     Examples
     --------
-    >>> a = na.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [3, 4, 5]])
-    >>> b = rotate_vector_3D(a, 2, na.pi/2)
+    >>> a = np.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [3, 4, 5]])
+    >>> b = rotate_vector_3D(a, 2, np.pi/2)
     >>> print b
     [[  1.00000000e+00  -1.00000000e+00   0.00000000e+00]
     [  6.12323400e-17  -1.00000000e+00   1.00000000e+00]
@@ -100,27 +100,27 @@
     mod = False
     if len(a.shape) == 1:
         mod = True
-        a = na.array([a])
+        a = np.array([a])
     if a.shape[1] !=3:
         raise SyntaxError("The second dimension of the array a must be == 3!")
     if dim == 0:
-        R = na.array([[1, 0,0],
-            [0, na.cos(angle), na.sin(angle)],
-            [0, -na.sin(angle), na.cos(angle)]])
+        R = np.array([[1, 0,0],
+            [0, np.cos(angle), np.sin(angle)],
+            [0, -np.sin(angle), np.cos(angle)]])
     elif dim == 1:
-        R = na.array([[na.cos(angle), 0, -na.sin(angle)],
+        R = np.array([[np.cos(angle), 0, -np.sin(angle)],
             [0, 1, 0],
-            [na.sin(angle), 0, na.cos(angle)]])
+            [np.sin(angle), 0, np.cos(angle)]])
     elif dim == 2:
-        R = na.array([[na.cos(angle), na.sin(angle), 0],
-            [-na.sin(angle), na.cos(angle), 0],
+        R = np.array([[np.cos(angle), np.sin(angle), 0],
+            [-np.sin(angle), np.cos(angle), 0],
             [0, 0, 1]])
     else:
         raise SyntaxError("dim must be 0, 1, or 2!")
     if mod:
-        return na.dot(R, a.T).T[0]
+        return np.dot(R, a.T).T[0]
     else:
-        return na.dot(R, a.T).T
+        return np.dot(R, a.T).T
     
 
 def modify_reference_frame(CoM, L, P, V):
@@ -164,9 +164,9 @@
     
     Examples
     --------
-    >>> CoM = na.array([0.5, 0.5, 0.5])
-    >>> L = na.array([1, 0, 0])
-    >>> P = na.array([[1, 0.5, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5], [0, 0, 0]])
+    >>> CoM = np.array([0.5, 0.5, 0.5])
+    >>> L = np.array([1, 0, 0])
+    >>> P = np.array([[1, 0.5, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5], [0, 0, 0]])
     >>> V = p.copy()
     >>> LL, PP, VV = modify_reference_frame(CoM, L, P, V)
     >>> LL
@@ -183,7 +183,7 @@
            [  0.00000000e+00,   0.00000000e+00,   0.00000000e+00]])
 
     """
-    if (L == na.array([0, 0, 1.])).all():
+    if (L == np.array([0, 0, 1.])).all():
         # Whew! Nothing to do!
         return L, P, V
     # First translate the positions to center of mass reference frame.
@@ -191,7 +191,7 @@
     # Now find the angle between modified L and the x-axis.
     LL = L.copy()
     LL[2] = 0.
-    theta = na.arccos(na.inner(LL, [1.,0,0])/na.inner(LL,LL)**.5)
+    theta = np.arccos(np.inner(LL, [1.,0,0])/np.inner(LL,LL)**.5)
     if L[1] < 0:
         theta = -theta
     # Now rotate all the position, velocity, and L vectors by this much around
@@ -200,7 +200,7 @@
     V = rotate_vector_3D(V, 2, theta)
     L = rotate_vector_3D(L, 2, theta)
     # Now find the angle between L and the z-axis.
-    theta = na.arccos(na.inner(L, [0,0,1])/na.inner(L,L)**.5)
+    theta = np.arccos(np.inner(L, [0,0,1])/np.inner(L,L)**.5)
     # This time we rotate around the y axis.
     P = rotate_vector_3D(P, 1, theta)
     V = rotate_vector_3D(V, 1, theta)
@@ -241,10 +241,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> circV = compute_rotational_velocity(CoM, L, P, V)
     >>> circV
     array([ 1.        ,  0.        ,  0.        ,  1.41421356])
@@ -254,13 +254,13 @@
     L, P, V = modify_reference_frame(CoM, L, P, V)
     # Find the vector in the plane of the galaxy for each position point
     # that is perpendicular to the radial vector.
-    radperp = na.cross([0, 0, 1], P)
+    radperp = np.cross([0, 0, 1], P)
     # Find the component of the velocity along the radperp vector.
     # Unf., I don't think there's a better way to do this.
-    res = na.empty(V.shape[0], dtype='float64')
+    res = np.empty(V.shape[0], dtype='float64')
     for i, rp in enumerate(radperp):
-        temp = na.dot(rp, V[i]) / na.dot(rp, rp) * rp
-        res[i] = na.dot(temp, temp)**0.5
+        temp = np.dot(rp, V[i]) / np.dot(rp, rp) * rp
+        res[i] = np.dot(temp, temp)**0.5
     return res
     
 def compute_parallel_velocity(CoM, L, P, V):
@@ -296,10 +296,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> paraV = compute_parallel_velocity(CoM, L, P, V)
     >>> paraV
     array([10, -1,  1, -1])
@@ -342,10 +342,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> radV = compute_radial_velocity(CoM, L, P, V)
     >>> radV
     array([ 1.        ,  1.41421356 ,  0.        ,  0.])
@@ -357,10 +357,10 @@
     # with the cylindrical radial vector for this point.
     # Unf., I don't think there's a better way to do this.
     P[:,2] = 0
-    res = na.empty(V.shape[0], dtype='float64')
+    res = np.empty(V.shape[0], dtype='float64')
     for i, rad in enumerate(P):
-        temp = na.dot(rad, V[i]) / na.dot(rad, rad) * rad
-        res[i] = na.dot(temp, temp)**0.5
+        temp = np.dot(rad, V[i]) / np.dot(rad, rad) * rad
+        res[i] = np.dot(temp, temp)**0.5
     return res
 
 def compute_cylindrical_radius(CoM, L, P, V):
@@ -396,10 +396,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> cyl_r = compute_cylindrical_radius(CoM, L, P, V)
     >>> cyl_r
     array([ 1.        ,  1.41421356,  0.        ,  1.41421356])
@@ -409,7 +409,7 @@
     # Demote all the positions to the z=0 plane, which makes the distance
     # calculation very easy.
     P[:,2] = 0
-    return na.sqrt((P * P).sum(axis=1))
+    return np.sqrt((P * P).sum(axis=1))
     
 def ortho_find(vec1):
     r"""Find two complementary orthonormal vectors to a given vector.
@@ -489,9 +489,9 @@
     >>> c
     array([-0.16903085,  0.84515425, -0.50709255])
     """
-    vec1 = na.array(vec1, dtype=na.float64)
+    vec1 = np.array(vec1, dtype=np.float64)
     # Normalize
-    norm = na.sqrt(na.vdot(vec1, vec1))
+    norm = np.sqrt(np.vdot(vec1, vec1))
     if norm == 0:
         raise ValueError("Zero vector used as input.")
     vec1 /= norm
@@ -513,9 +513,9 @@
         z2 = 0.0
         x2 = -(y1 / x1)
         norm2 = (1.0 + z2 ** 2.0) ** (0.5)
-    vec2 = na.array([x2,y2,z2])
+    vec2 = np.array([x2,y2,z2])
     vec2 /= norm2
-    vec3 = na.cross(vec1, vec2)
+    vec3 = np.cross(vec1, vec2)
     return vec1, vec2, vec3
 
 def quartiles(a, axis=None, out=None, overwrite_input=False):
@@ -570,7 +570,7 @@
 
     Examples
     --------
-    >>> a = na.arange(100).reshape(10,10)
+    >>> a = np.arange(100).reshape(10,10)
     >>> a
     array([[ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9],
            [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
@@ -601,7 +601,7 @@
             a.sort(axis=axis)
             sorted = a
     else:
-        sorted = na.sort(a, axis=axis)
+        sorted = np.sort(a, axis=axis)
     if axis is None:
         axis = 0
     indexer = [slice(None)] * sorted.ndim
@@ -619,8 +619,8 @@
             indexer[axis] = slice(index, index+1)
         # Use mean in odd and even case to coerce data type
         # and check, use out array.
-        result.append(na.mean(sorted[indexer], axis=axis, out=out))
-    return na.array(result)
+        result.append(np.mean(sorted[indexer], axis=axis, out=out))
+    return np.array(result)
 
 def get_rotation_matrix(theta, rot_vector):
     """
@@ -656,21 +656,174 @@
     array([[ 0.70710678,  0.        ,  0.70710678],
            [ 0.        ,  1.        ,  0.        ],
            [-0.70710678,  0.        ,  0.70710678]])
-    >>> na.dot(rot,a)
+    >>> np.dot(rot,a)
     array([ 0.,  1.,  0.])
     # since a is an eigenvector by construction
-    >>> na.dot(rot,[1,0,0])
+    >>> np.dot(rot,[1,0,0])
     array([ 0.70710678,  0.        , -0.70710678])
     """
 
     ux = rot_vector[0]
     uy = rot_vector[1]
     uz = rot_vector[2]
-    cost = na.cos(theta)
-    sint = na.sin(theta)
+    cost = np.cos(theta)
+    sint = np.sin(theta)
     
-    R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
+    R = np.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
                   [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
     
     return R
+
+def get_ortho_basis(normal):
+    xprime = np.cross([0.0,1.0,0.0],normal)
+    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
+    yprime = np.cross(normal,xprime)
+    zprime = normal
+    return (xprime, yprime, zprime)
+
+def get_sph_r(coords):
+    # The spherical coordinates radius is simply the magnitude of the
+    # coordinate vector.
+
+    return np.sqrt(np.sum(coords**2, axis=-1))
+
+
+def get_sph_theta(coords, normal):
+    # The angle (theta) with respect to the normal (J), is the arccos
+    # of the dot product of the normal with the normalized coordinate
+    # vector.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = np.tile(normal,tile_shape)
+
+    JdotCoords = np.sum(J*coords,axis=-1)
+    
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
+
+def get_sph_phi(coords, normal):
+    # We have freedom with respect to what axis (xprime) to define
+    # the disk angle. Here I've chosen to use the axis that is
+    # perpendicular to the normal and the y-axis. When normal ==
+    # y-hat, then set xprime = z-hat. With this definition, when
+    # normal == z-hat (as is typical), then xprime == x-hat.
+    #
+    # The angle is then given by the arctan of the ratio of the
+    # yprime-component and the xprime-component of the coordinate 
+    # vector.
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    Jx = np.tile(xprime,tile_shape)
+    Jy = np.tile(yprime,tile_shape)
+
+    Px = np.sum(Jx*coords,axis=-1)
+    Py = np.sum(Jy*coords,axis=-1)
+    
+    return np.arctan2(Py,Px)
+
+def get_cyl_r(coords, normal):
+    # The cross product of the normal (J) with a coordinate vector
+    # gives a vector of magnitude equal to the cylindrical radius.
+
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = np.tile(normal, tile_shape)
+    
+    JcrossCoords = np.cross(J, coords)
+    return np.sqrt(np.sum(JcrossCoords**2, axis=-1))
+
+def get_cyl_z(coords, normal):
+    # The dot product of the normal (J) with the coordinate vector 
+    # gives the cylindrical height.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = np.tile(normal, tile_shape)
+
+    return np.sum(J*coords, axis=-1)  
+
+def get_cyl_theta(coords, normal):
+    # This is identical to the spherical phi component
+
+    return get_sph_phi(coords, normal)
+
+
+def get_cyl_r_component(vectors, theta, normal):
+    # The r of a vector is the vector dotted with rhat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    tile_shape = list(vectors.shape)[:-1] + [1]
+    Jx = np.tile(xprime,tile_shape)
+    Jy = np.tile(yprime,tile_shape)
+
+    rhat = Jx*np.cos(theta) + Jy*np.sin(theta)
+
+    return np.sum(vectors*rhat,axis=-1)
+
+def get_cyl_theta_component(vectors, theta, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    tile_shape = list(vectors.shape)[:-1] + [1]
+    Jx = np.tile(xprime,tile_shape)
+    Jy = np.tile(yprime,tile_shape)
+
+    thetahat = -Jx*np.sin(theta) + Jy*np.cos(theta)
+
+    return np.sum(vectors*thetahat, axis=-1)
+
+def get_cyl_z_component(vectors, normal):
+    # The z component of a vector is the vector dotted with zhat
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    tile_shape = list(vectors.shape)[:-1] + [1]
+    zhat = np.tile(zprime, tile_shape)
+
+    return np.sum(vectors*zhat, axis=-1)
+
+def get_sph_r_component(vectors, theta, phi, normal):
+    # The r component of a vector is the vector dotted with rhat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    tile_shape = list(vectors.shape)[:-1] + [1]
+    Jx = np.tile(xprime,tile_shape)
+    Jy = np.tile(yprime,tile_shape)
+    Jz = np.tile(zprime,tile_shape)
+
+    rhat = Jx*np.sin(theta)*np.cos(phi) + \
+           Jy*np.sin(theta)*np.sin(phi) + \
+           Jz*np.cos(theta)
+
+    return np.sum(vectors*rhat, axis=-1)
+
+def get_sph_phi_component(vectors, phi, normal):
+    # The phi component of a vector is the vector dotted with phihat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    tile_shape = list(vectors.shape)[:-1] + [1]
+    Jx = np.tile(xprime,tile_shape)
+    Jy = np.tile(yprime,tile_shape)
+
+    phihat = -Jx*np.sin(phi) + Jy*np.cos(phi)
+
+    return np.sum(vectors*phihat, axis=-1)
+
+def get_sph_theta_component(vectors, theta, phi, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    tile_shape = list(vectors.shape)[:-1] + [1]
+    Jx = np.tile(xprime,tile_shape)
+    Jy = np.tile(yprime,tile_shape)
+    Jz = np.tile(zprime,tile_shape)
+    
+    thetahat = Jx*np.cos(theta)*np.cos(phi) + \
+               Jy*np.cos(theta)*np.sin(phi) - \
+               Jz*np.sin(theta)
+
+    return np.sum(vectors*thetahat, axis=-1)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -23,13 +23,14 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import abc
 import json
 import urllib2
 from tempfile import TemporaryFile
 from yt.config import ytcfg
 from yt.funcs import *
+from yt.utilities.exceptions import *
 
 from .poster.streaminghttp import register_openers
 from .poster.encode import multipart_encode
@@ -93,14 +94,15 @@
     def upload(self):
         api_key = ytcfg.get("yt","hub_api_key")
         url = ytcfg.get("yt","hub_url")
+        if api_key == '': raise YTHubRegisterError
         metadata, (final_name, chunks) = self._generate_post()
         if hasattr(self, "_pf_mrep"):
             self._pf_mrep.upload()
         for i in metadata:
-            if isinstance(metadata[i], na.ndarray):
+            if isinstance(metadata[i], np.ndarray):
                 metadata[i] = metadata[i].tolist()
             elif hasattr(metadata[i], 'dtype'):
-                metadata[i] = na.asscalar(metadata[i])
+                metadata[i] = np.asscalar(metadata[i])
         metadata['obj_type'] = self.type
         if len(chunks) == 0:
             chunk_info = {'chunks': []}
@@ -129,7 +131,7 @@
         for i, (cn, cv) in enumerate(chunks):
             remaining = cv.size * cv.itemsize
             f = TemporaryFile()
-            na.save(f, cv)
+            np.save(f, cv)
             f.seek(0)
             pbar = UploaderBar("%s, % 2i/% 2i" % (self.type, i+1, len(chunks)))
             datagen, headers = multipart_encode({'chunk_data' : f}, cb = pbar)
@@ -216,3 +218,22 @@
         metadata = self._attrs
         chunks = []
         return (metadata, ("chunks", []))
+
+class MinimalNotebook(MinimalRepresentation):
+    type = "notebook"
+    _attr_list = ("title",)
+
+    def __init__(self, filename, title = None):
+        # First we read in the data
+        if not os.path.isfile(filename):
+            raise IOError(filename)
+        self.data = open(filename).read()
+        if title is None:
+            title = json.loads(self.data)['metadata']['name']
+        self.title = title
+        self.data = np.fromstring(self.data, dtype='c')
+
+    def _generate_post(self):
+        metadata = self._attrs
+        chunks = [ ("notebook", self.data) ]
+        return (metadata, ("chunks", chunks))


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.math_utils import get_rotation_matrix
@@ -52,37 +52,39 @@
            
         """
         self.steady_north = steady_north
-        if na.all(north_vector == normal_vector):
+        if np.all(north_vector == normal_vector):
             mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
             north_vector = None
         if north_vector is not None: self.steady_north = True
+        self.north_vector = north_vector
         self._setup_normalized_vectors(normal_vector, north_vector)
 
     def _setup_normalized_vectors(self, normal_vector, north_vector):
         # Now we set up our various vectors
-        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
+        normal_vector /= np.sqrt( np.dot(normal_vector, normal_vector))
         if north_vector is None:
-            vecs = na.identity(3)
-            t = na.cross(normal_vector, vecs).sum(axis=1)
+            vecs = np.identity(3)
+            t = np.cross(normal_vector, vecs).sum(axis=1)
             ax = t.argmax()
-            east_vector = na.cross(vecs[ax,:], normal_vector).ravel()
-            north_vector = na.cross(normal_vector, east_vector).ravel()
+            east_vector = np.cross(vecs[ax,:], normal_vector).ravel()
+            # self.north_vector must remain None otherwise rotations about a fixed axis will break.  
+            # The north_vector calculated here will still be included in self.unit_vectors.
+            north_vector = np.cross(normal_vector, east_vector).ravel()
         else:
             if self.steady_north:
-                north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector
-            east_vector = na.cross(north_vector, normal_vector).ravel()
-        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
-        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
+                north_vector = north_vector - np.dot(north_vector,normal_vector)*normal_vector
+            east_vector = np.cross(north_vector, normal_vector).ravel()
+        north_vector /= np.sqrt(np.dot(north_vector, north_vector))
+        east_vector /= np.sqrt(np.dot(east_vector, east_vector))
         self.normal_vector = normal_vector
-        self.north_vector = north_vector
         self.unit_vectors = [east_vector, north_vector, normal_vector]
-        self.inv_mat = na.linalg.pinv(self.unit_vectors)
+        self.inv_mat = np.linalg.pinv(self.unit_vectors)
         
     def switch_orientation(self, normal_vector=None, north_vector=None):
         r"""Change the view direction based on any of the orientation parameters.
 
         This will recalculate all the necessary vectors and vector planes related
-        to a an orientable object.
+        to an orientable object.
 
         Parameters
         ----------


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/parallel_tools/controller_system.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/controller_system.py
@@ -0,0 +1,69 @@
+"""
+A queueing system based on MPI
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+    
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+from contextmanager import contextlib
+from abc import ABCMeta, abstractmethod, abstractproperty
+
+class WorkSplitter(object):
+    def __init__(self, controller, group1, group2):
+        self.group1 = group1
+        self.group2 = group2
+        self.controller = controller
+
+    @classmethod
+    def setup(cls, ng1, ng2):
+        pp, wg = ProcessorPool.from_sizes(
+            [(1, "controller"), (ng1, "group1"), (ng2, "group2")])
+        groupc = pp['controller']
+        group1 = pp['group1']
+        group2 = pp['group2']
+        obj = cls(groupc, group1, group2)
+        obj.run(wg.name)
+
+    def run(self, name):
+        if name == "controller":
+            self.run_controller()
+        elif name == "group1":
+            self.run_group1()
+        elif name == "group2":
+            self.run_group2()
+        else:
+            raise NotImplementedError
+
+    @abstractmethod
+    def run_controller(self):
+        pass
+
+    @abstractmethod
+    def run_group1(self):
+        pass
+
+    @abstractmethod
+    def run_group2(self):
+        pass


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/parallel_tools/io_runner.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -0,0 +1,195 @@
+"""
+A simple IO staging mechanism
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import os
+from .parallel_analysis_interface import ProcessorPool
+from yt.utilities.io_handler import BaseIOHandler
+from contextlib import contextmanager
+import time
+
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+
+YT_TAG_MESSAGE = 317 # Cell 317 knows where to go
+
+class IOCommunicator(BaseIOHandler):
+    def __init__(self, pf, wg, pool):
+        mylog.info("Initializing IOCommunicator")
+        self.pf = pf
+        self.wg = wg # We don't need to use this!
+        self.pool = pool
+        self.comm = pool.comm
+        # We read our grids here
+        self.grids = []
+        storage = {}
+        grids = pf.h.grids.tolist()
+        grids.sort(key=lambda a:a.filename)
+        for sto, g in parallel_objects(grids, storage = storage):
+            sto.result = self.comm.rank
+            sto.result_id = g.id
+            self.grids.append(g)
+        self._id_offset = pf.h.grids[0]._id_offset
+        mylog.info("Reading from disk ...")
+        self.initialize_data()
+        mylog.info("Broadcasting ...")
+        self.comm.comm.bcast(storage, root = wg.ranks[0])
+        mylog.info("Done.")
+        self.hooks = []
+
+    def initialize_data(self):
+        pf = self.pf
+        fields = [f for f in pf.h.field_list
+                  if not pf.field_info[f].particle_type]
+        pfields = [f for f in pf.h.field_list
+                   if pf.field_info[f].particle_type]
+        # Preload is only defined for Enzo ...
+        if pf.h.io._data_style == "enzo_packed_3d":
+            self.queue = pf.h.io.queue
+            pf.h.io.preload(self.grids, fields)
+            for g in self.grids:
+                for f in fields:
+                    if f not in self.queue[g.id]:
+                        d = np.zeros(g.ActiveDimensions, dtype='float64')
+                        self.queue[g.id][f] = d
+                for f in pfields:
+                    self.queue[g.id][f] = self._read(g, f)
+        else:
+            self.queue = {}
+            for g in self.grids:
+                for f in fields + pfields:
+                    self.queue[g.id][f] = pf.h.io._read(g, f)
+
+    def _read(self, g, f):
+        fi = self.pf.field_info[f]
+        if fi.particle_type and g.NumberOfParticles == 0:
+            # because this gets upcast to float
+            return np.array([],dtype='float64')
+        try:
+            temp = self.pf.h.io._read_data_set(g, f)
+        except:# self.pf.hierarchy.io._read_exception as exc:
+            if fi.not_in_all:
+                temp = np.zeros(g.ActiveDimensions, dtype='float64')
+            else:
+                raise
+        return temp
+
+    def wait(self):
+        status = MPI.Status()
+        while 1:
+            if self.comm.comm.Iprobe(MPI.ANY_SOURCE,
+                                YT_TAG_MESSAGE,
+                                status = status):
+                msg = self.comm.comm.recv(
+                        source = status.source, tag = YT_TAG_MESSAGE)
+                if msg['op'] == "end":
+                    mylog.debug("Shutting down IO.")
+                    break
+                self._send_data(msg, status.source)
+                status = MPI.Status()
+            else:
+                time.sleep(1e-2)
+
+    def _send_data(self, msg, dest):
+        grid_id = msg['grid_id']
+        field = msg['field']
+        ts = self.queue[grid_id][field].astype("float64")
+        mylog.debug("Opening send to %s (%s)", dest, ts.shape)
+        self.hooks.append(self.comm.comm.Isend([ts, MPI.DOUBLE], dest = dest))
+
+class IOHandlerRemote(BaseIOHandler):
+    _data_style = "remote"
+
+    def __init__(self, pf, wg, pool):
+        self.pf = pf
+        self.wg = wg # probably won't need
+        self.pool = pool
+        self.comm = pool.comm
+        self.proc_map = self.comm.comm.bcast(None,
+                root = pool['io'].ranks[0])
+        super(IOHandlerRemote, self).__init__()
+
+    def _read_data_set(self, grid, field):
+        dest = self.proc_map[grid.id]
+        msg = dict(grid_id = grid.id, field = field, op="read")
+        mylog.debug("Requesting %s for %s from %s", field, grid, dest)
+        if self.pf.field_info[field].particle_type:
+            data = np.empty(grid.NumberOfParticles, 'float64')
+        else:
+            data = np.empty(grid.ActiveDimensions, 'float64')
+        hook = self.comm.comm.Irecv([data, MPI.DOUBLE], source = dest)
+        self.comm.comm.send(msg, dest = dest, tag = YT_TAG_MESSAGE)
+        mylog.debug("Waiting for data.")
+        MPI.Request.Wait(hook)
+        return data
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        #sl = tuple(reversed(sl))
+        return self._read_data_set(grid,field)[sl]
+
+    def terminate(self):
+        msg = dict(op='end')
+        if self.wg.comm.rank == 0:
+            for rank in self.pool['io'].ranks:
+                mylog.debug("Sending termination message to %s", rank)
+                self.comm.comm.send(msg, dest=rank, tag=YT_TAG_MESSAGE)
+
+ at contextmanager
+def remote_io(pf, wg, pool):
+    original_io = pf.h.io
+    pf.h.io = IOHandlerRemote(pf, wg, pool)
+    yield
+    pf.h.io.terminate()
+    pf.h.io = original_io
+
+def io_nodes(fn, n_io, n_work, func, *args, **kwargs):
+    pool, wg = ProcessorPool.from_sizes([(n_io, "io"), (n_work, "work")])
+    rv = None
+    if wg.name == "work":
+        pf = load(fn)
+        with remote_io(pf, wg, pool):
+            rv = func(pf, *args, **kwargs)
+    elif wg.name == "io":
+        pf = load(fn)
+        io = IOCommunicator(pf, wg, pool)
+        io.wait()
+    # We should broadcast the result
+    rv = pool.comm.mpi_bcast(rv, root=pool['work'].ranks[0])
+    pool.free_all()
+    mylog.debug("Return value: %s", rv)
+    return rv
+
+# Here is an example of how to use this functionality.
+if __name__ == "__main__":
+    def gq(pf):
+        dd = pf.h.all_data()
+        return dd.quantities["TotalQuantity"]("CellMassMsun")
+    q = io_nodes("DD0087/DD0087", 8, 24, gq)
+    mylog.info(q)
+
+


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -27,7 +27,7 @@
 import cStringIO
 import itertools
 import logging
-import numpy as na
+import numpy as np
 import sys
 
 from yt.funcs import *
@@ -131,13 +131,13 @@
         # Note that we're doing this in advance, and with a simple means
         # of choosing them; more advanced methods will be explored later.
         if self._use_all:
-            self.my_obj_ids = na.arange(len(self._objs))
+            self.my_obj_ids = np.arange(len(self._objs))
         else:
             if not round_robin:
-                self.my_obj_ids = na.array_split(
-                                na.arange(len(self._objs)), self._skip)[self._offset]
+                self.my_obj_ids = np.array_split(
+                                np.arange(len(self._objs)), self._skip)[self._offset]
             else:
-                self.my_obj_ids = na.arange(len(self._objs))[self._offset::self._skip]
+                self.my_obj_ids = np.arange(len(self._objs))[self._offset::self._skip]
         
     def __iter__(self):
         for gid in self.my_obj_ids:
@@ -271,7 +271,7 @@
         self.size = size
         self.ranks = ranks
         self.comm = comm
-	self.name = name
+        self.name = name
 
 class ProcessorPool(object):
     comm = None
@@ -294,11 +294,9 @@
             raise RuntimeError
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
-
-	# Default name to the workgroup number.
+        # Default name to the workgroup number.
         if name is None: 
-	    name = string(len(workgroups))
-	    
+            name = string(len(workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
@@ -423,19 +421,18 @@
             njobs, my_size)
         raise RuntimeError
     my_rank = my_communicator.rank
-    all_new_comms = na.array_split(na.arange(my_size), njobs)
+    all_new_comms = np.array_split(np.arange(my_size), njobs)
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i
             break
     if parallel_capable:
         communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
-
     to_share = {}
     # If our objects object is slice-aware, like time series data objects are,
     # this will prevent intermediate objects from being created.
-    oiter = itertools.islice(enumerate(objects), my_new_id, None, njobs)
-    for result_id, obj in oiter:
+    for obj_id, obj in enumerate(objects):
+        result_id = obj_id * njobs + my_new_id
         if storage is not None:
             rstore = ResultsStorage()
             rstore.result_id = result_id
@@ -525,14 +522,14 @@
         #   cat
         #   join
         # data is selected to be of types:
-        #   na.ndarray
+        #   np.ndarray
         #   dict
         #   data field dict
         if datatype is not None:
             pass
         elif isinstance(data, types.DictType):
             datatype == "dict"
-        elif isinstance(data, na.ndarray):
+        elif isinstance(data, np.ndarray):
             datatype == "array"
         elif isinstance(data, types.ListType):
             datatype == "list"
@@ -549,14 +546,14 @@
             field_keys = data.keys()
             field_keys.sort()
             size = data[field_keys[0]].shape[-1]
-            sizes = na.zeros(self.comm.size, dtype='int64')
-            outsize = na.array(size, dtype='int64')
+            sizes = np.zeros(self.comm.size, dtype='int64')
+            outsize = np.array(size, dtype='int64')
             self.comm.Allgather([outsize, 1, MPI.LONG],
                                      [sizes, 1, MPI.LONG] )
             # This nested concatenate is to get the shapes to work out correctly;
             # if we just add [0] to sizes, it will broadcast a summation, not a
             # concatenation.
-            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            offsets = np.add.accumulate(np.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             for key in field_keys:
                 dd = data[key]
@@ -581,16 +578,16 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = na.zeros(0, dtype=dtype) # This only works for
+                    data = np.zeros(0, dtype=dtype) # This only works for
             size = data.shape[-1]
-            sizes = na.zeros(self.comm.size, dtype='int64')
-            outsize = na.array(size, dtype='int64')
+            sizes = np.zeros(self.comm.size, dtype='int64')
+            outsize = np.array(size, dtype='int64')
             self.comm.Allgather([outsize, 1, MPI.LONG],
                                      [sizes, 1, MPI.LONG] )
             # This nested concatenate is to get the shapes to work out correctly;
             # if we just add [0] to sizes, it will broadcast a summation, not a
             # concatenation.
-            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            offsets = np.add.accumulate(np.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             data = self.alltoallv_array(data, arr_size, offsets, sizes)
             return data
@@ -608,7 +605,7 @@
     def mpi_bcast(self, data, root = 0):
         # The second check below makes sure that we know how to communicate
         # this type of array. Otherwise, we'll pickle it.
-        if isinstance(data, na.ndarray) and \
+        if isinstance(data, np.ndarray) and \
                 get_mpi_type(data.dtype) is not None:
             if self.comm.rank == root:
                 info = (data.shape, data.dtype)
@@ -616,7 +613,7 @@
                 info = ()
             info = self.comm.bcast(info, root=root)
             if self.comm.rank != root:
-                data = na.empty(info[0], dtype=info[1])
+                data = np.empty(info[0], dtype=info[1])
             mpi_type = get_mpi_type(info[1])
             self.comm.Bcast([data, mpi_type], root = root)
             return data
@@ -636,7 +633,7 @@
     @parallel_passthrough
     def mpi_allreduce(self, data, dtype=None, op='sum'):
         op = op_names[op]
-        if isinstance(data, na.ndarray) and data.dtype != na.bool:
+        if isinstance(data, np.ndarray) and data.dtype != np.bool:
             if dtype is None:
                 dtype = data.dtype
             if dtype != data.dtype:
@@ -743,7 +740,7 @@
         return (obj._owner == self.comm.rank)
 
     def send_quadtree(self, target, buf, tgd, args):
-        sizebuf = na.zeros(1, 'int64')
+        sizebuf = np.zeros(1, 'int64')
         sizebuf[0] = buf[0].size
         self.comm.Send([sizebuf, MPI.LONG], dest=target)
         self.comm.Send([buf[0], MPI.INT], dest=target)
@@ -751,11 +748,11 @@
         self.comm.Send([buf[2], MPI.DOUBLE], dest=target)
         
     def recv_quadtree(self, target, tgd, args):
-        sizebuf = na.zeros(1, 'int64')
+        sizebuf = np.zeros(1, 'int64')
         self.comm.Recv(sizebuf, source=target)
-        buf = [na.empty((sizebuf[0],), 'int32'),
-               na.empty((sizebuf[0], args[2]),'float64'),
-               na.empty((sizebuf[0],),'float64')]
+        buf = [np.empty((sizebuf[0],), 'int32'),
+               np.empty((sizebuf[0], args[2]),'float64'),
+               np.empty((sizebuf[0],),'float64')]
         self.comm.Recv([buf[0], MPI.INT], source=target)
         self.comm.Recv([buf[1], MPI.DOUBLE], source=target)
         self.comm.Recv([buf[2], MPI.DOUBLE], source=target)
@@ -775,8 +772,8 @@
         sys.exit()
 
         args = qt.get_args() # Will always be the same
-        tgd = na.array([args[0], args[1]], dtype='int64')
-        sizebuf = na.zeros(1, 'int64')
+        tgd = np.array([args[0], args[1]], dtype='int64')
+        sizebuf = np.zeros(1, 'int64')
 
         while mask < size:
             if (mask & rank) != 0:
@@ -802,9 +799,9 @@
             sizebuf[0] = buf[0].size
         self.comm.Bcast([sizebuf, MPI.LONG], root=0)
         if rank != 0:
-            buf = [na.empty((sizebuf[0],), 'int32'),
-                   na.empty((sizebuf[0], args[2]),'float64'),
-                   na.empty((sizebuf[0],),'float64')]
+            buf = [np.empty((sizebuf[0],), 'int32'),
+                   np.empty((sizebuf[0], args[2]),'float64'),
+                   np.empty((sizebuf[0],),'float64')]
         self.comm.Bcast([buf[0], MPI.INT], root=0)
         self.comm.Bcast([buf[1], MPI.DOUBLE], root=0)
         self.comm.Bcast([buf[2], MPI.DOUBLE], root=0)
@@ -816,7 +813,7 @@
 
 
     def send_array(self, arr, dest, tag = 0):
-        if not isinstance(arr, na.ndarray):
+        if not isinstance(arr, np.ndarray):
             self.comm.send((None,None), dest=dest, tag=tag)
             self.comm.send(arr, dest=dest, tag=tag)
             return
@@ -830,7 +827,7 @@
         dt, ne = self.comm.recv(source=source, tag=tag)
         if dt is None and ne is None:
             return self.comm.recv(source=source, tag=tag)
-        arr = na.empty(ne, dtype=dt)
+        arr = np.empty(ne, dtype=dt)
         tmp = arr.view(self.__tocast)
         self.comm.Recv([tmp, MPI.CHAR], source=source, tag=tag)
         return arr
@@ -841,11 +838,11 @@
             for i in range(send.shape[0]):
                 recv.append(self.alltoallv_array(send[i,:].copy(), 
                                                  total_size, offsets, sizes))
-            recv = na.array(recv)
+            recv = np.array(recv)
             return recv
         offset = offsets[self.comm.rank]
         tmp_send = send.view(self.__tocast)
-        recv = na.empty(total_size, dtype=send.dtype)
+        recv = np.empty(total_size, dtype=send.dtype)
         recv[offset:offset+send.size] = send[:]
         dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
         roff = [off * dtr for off in offsets]
@@ -867,7 +864,7 @@
 
 communication_system = CommunicationSystem()
 if parallel_capable:
-    ranks = na.arange(MPI.COMM_WORLD.size)
+    ranks = np.arange(MPI.COMM_WORLD.size)
     communication_system.push_with_ids(ranks)
 
 class ParallelAnalysisInterface(object):
@@ -926,13 +923,13 @@
         xax, yax = x_dict[axis], y_dict[axis]
         cc = MPI.Compute_dims(self.comm.size, 2)
         mi = self.comm.rank
-        cx, cy = na.unravel_index(mi, cc)
-        x = na.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
+        cx, cy = np.unravel_index(mi, cc)
+        x = np.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
 
         DLE, DRE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
-        LE = na.ones(3, dtype='float64') * DLE
-        RE = na.ones(3, dtype='float64') * DRE
+        LE = np.ones(3, dtype='float64') * DLE
+        RE = np.ones(3, dtype='float64') * DRE
         LE[xax] = x[0] * (DRE[xax]-DLE[xax]) + DLE[xax]
         RE[xax] = x[1] * (DRE[xax]-DLE[xax]) + DLE[xax]
         LE[yax] = y[0] * (DRE[yax]-DLE[yax]) + DLE[yax]
@@ -943,7 +940,7 @@
         return True, reg
 
     def partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
-        LE, RE = na.array(ds.left_edge), na.array(ds.right_edge)
+        LE, RE = np.array(ds.left_edge), np.array(ds.right_edge)
         # We need to establish if we're looking at a subvolume, in which case
         # we *do* want to pad things.
         if (LE == self.pf.domain_left_edge).all() and \
@@ -973,13 +970,13 @@
 
         cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
         mi = self.comm.rank % (self.comm.size / rank_ratio)
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
+        cx, cy, cz = np.unravel_index(mi, cc)
+        x = np.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
+        z = np.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
 
-        LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        RE = na.array([x[1], y[1], z[1]], dtype='float64')
+        LE = np.array([x[0], y[0], z[0]], dtype='float64')
+        RE = np.array([x[1], y[1], z[1]], dtype='float64')
 
         if padding > 0:
             return True, \
@@ -1000,13 +997,13 @@
         
         cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
         mi = self.comm.rank % (self.comm.size / rank_ratio)
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
+        cx, cy, cz = np.unravel_index(mi, cc)
+        x = np.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
+        z = np.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
 
-        LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        RE = na.array([x[1], y[1], z[1]], dtype='float64')
+        LE = np.array([x[0], y[0], z[0]], dtype='float64')
+        RE = np.array([x[1], y[1], z[1]], dtype='float64')
 
         if padding > 0:
             return True, \


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/parallel_tools/task_queue.py
--- a/yt/utilities/parallel_tools/task_queue.py
+++ b/yt/utilities/parallel_tools/task_queue.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import time, threading, random
 
 from yt.funcs import *
@@ -142,8 +142,8 @@
                     njobs, (my_size - 1))
         raise RunTimeError
     my_rank = comm.rank
-    all_new_comms = na.array_split(na.arange(1, my_size), njobs)
-    all_new_comms.insert(0, na.array([0]))
+    all_new_comms = np.array_split(np.arange(1, my_size), njobs)
+    all_new_comms.insert(0, np.array([0]))
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i
@@ -170,8 +170,8 @@
                     njobs, (my_size - 1))
         raise RunTimeError
     my_rank = comm.rank
-    all_new_comms = na.array_split(na.arange(1, my_size), njobs)
-    all_new_comms.insert(0, na.array([0]))
+    all_new_comms = np.array_split(np.arange(1, my_size), njobs)
+    all_new_comms.insert(0, np.array([0]))
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/tests/test_coordinate_conversions.py
--- /dev/null
+++ b/yt/utilities/tests/test_coordinate_conversions.py
@@ -0,0 +1,128 @@
+from yt.testing import *
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
+
+# Randomly generated coordinates in the domain [[-1,1],[-1,1],-1,1]]
+coords = np.array([[-0.41503037, -0.22102472, -0.55774212],
+                   [ 0.73828247, -0.17913899,  0.64076921],
+                   [ 0.08922066, -0.94254844, -0.61774511],
+                   [ 0.10173242, -0.95789145,  0.16294352],
+                   [ 0.73186508, -0.3109153 ,  0.75728738],
+                   [ 0.8757989 , -0.41475119, -0.57039201],
+                   [ 0.58040762,  0.81969082,  0.46759728],
+                   [-0.89983356, -0.9853683 , -0.38355343]])
+
+def test_spherical_coordinate_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.72950559,  0.99384957,  1.13047198,  0.97696269,  
+                   1.09807968,  1.12445067,  1.10788685,  1.38843954]
+    real_theta = [ 2.44113629,  0.87012028,  2.14891444,  1.4032274 ,  
+                   0.80979483,  2.10280198,  1.13507735,  1.85068416]
+    real_phi =   [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+
+    calc_r = get_sph_r(coords)
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+    normal = [1, 0, 0]
+    real_theta = [ 2.17598842,  0.73347681,  1.49179079,  1.46647589,  
+                   0.8412984 ,  0.67793705,  1.0193883 ,  2.27586987]
+    real_phi =   [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                   -2.75201227,-0.62870527,  2.08920872, -1.19959244]
+
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+    
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+def test_cylindrical_coordiante_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.47021498,  0.75970506,  0.94676179,  0.96327853,  
+                   0.79516968,  0.96904193,  1.00437346,  1.3344104 ]    
+    real_theta = [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+    real_z =     [-0.55774212,  0.64076921, -0.61774511,  0.16294352,
+                   0.75728738, -0.57039201,  0.46759728, -0.38355343]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+    normal = [1, 0, 0]
+    real_r =     [ 0.59994016,  0.66533898,  1.12694569,  0.97165149,
+                   0.81862843,  0.70524152,  0.94368441,  1.05738542]
+    real_theta = [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                  -2.75201227, -0.62870527,  2.08920872, -1.19959244]
+    real_z =     [-0.41503037,  0.73828247,  0.08922066,  0.10173242,
+                   0.73186508,  0.8757989 ,  0.58040762, -0.89983356]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+def test_spherical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_sph_theta(coords, normal)
+    theta_pass = np.tile(theta, (3, 1)).T
+    phi = get_sph_phi(coords, normal)
+    phi_pass = np.tile(phi, (3, 1)).T
+    zero = np.tile(0,coords.shape[0])
+
+    # Purely radial field
+    vecs = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)]).T
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta_pass, phi_pass, normal))
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi_pass, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(phi), np.cos(phi), zero]).T
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta_pass, phi_pass, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta_pass, phi_pass, normal))
+
+    # Purely poloidal field
+    vecs = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -np.sin(theta)]).T
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi_pass, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta_pass, phi_pass, normal))
+
+def test_cylindrical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_cyl_theta(coords, normal)
+    theta_pass = np.tile(theta, (3, 1)).T
+    z = get_cyl_z(coords, normal)
+    zero = np.tile(0, coords.shape[0])
+
+    # Purely radial field
+    vecs = np.array([np.cos(theta), np.sin(theta), zero]).T
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta_pass, normal))
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(theta), np.cos(theta), zero]).T
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta_pass, normal))
+
+    # Purely z field
+    vecs = np.array([zero, zero, z]).T
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta_pass, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta_pass, normal))


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/tests/test_decompose.py
--- /dev/null
+++ b/yt/utilities/tests/test_decompose.py
@@ -0,0 +1,96 @@
+"""
+Test suite for cartesian domain decomposition.
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import assert_array_equal, assert_almost_equal
+import numpy as np
+import yt.utilities.decompose as dec
+
+
+def setup():
+    pass
+
+
+def test_psize_2d():
+    procs = dec.get_psize(np.array([5, 1, 7]), 6)
+    assert_array_equal(procs, np.array([3, 1, 2]))
+    procs = dec.get_psize(np.array([1, 7, 5]), 6)
+    assert_array_equal(procs, np.array([1, 2, 3]))
+    procs = dec.get_psize(np.array([7, 5, 1]), 6)
+    assert_array_equal(procs, np.array([2, 3, 1]))
+
+
+def test_psize_3d():
+    procs = dec.get_psize(np.array([33, 35, 37]), 12)
+    assert_array_equal(procs, np.array([3, 2, 2]))
+
+
+def test_decomposition_2d():
+    array = np.ones((7, 5, 1))
+    bbox = np.array([[-0.7, 0.0], [1.5, 2.0], [0.0, 0.7]])
+    ledge, redge, data = dec.decompose_array(array, np.array([2, 3, 1]), bbox)
+
+    assert_array_equal(data[1].shape, np.array([3, 2, 1]))
+
+    gold_le = np.array([
+                       [-0.7, 1.5, 0.0], [-0.7, 1.6, 0.0],
+                       [-0.7, 1.8, 0.0], [-0.4, 1.5, 0.0],
+                       [-0.4, 1.6, 0.0], [-0.4, 1.8, 0.0]
+                       ])
+    assert_almost_equal(ledge, gold_le, 8)
+
+    gold_re = np.array(
+        [[-0.4, 1.6, 0.7], [-0.4, 1.8, 0.7],
+         [-0.4, 2.0, 0.7], [0.0, 1.6, 0.7],
+         [0.0, 1.8, 0.7], [0.0, 2.0, 0.7]]
+    )
+    assert_almost_equal(redge, gold_re, 8)
+
+
+def test_decomposition_3d():
+    array = np.ones((33, 35, 37))
+    bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+
+    ledge, redge, data = dec.decompose_array(array, np.array([3, 2, 2]), bbox)
+    assert_array_equal(data[0].shape, np.array([11, 17, 18]))
+
+    gold_le = np.array(
+        [[0.00000, -1.50000, 1.00000], [0.00000, -1.50000, 1.72973],
+         [0.00000, -0.04286, 1.00000], [0.00000, -0.04286, 1.72973],
+         [0.33333, -1.50000, 1.00000], [0.33333, -1.50000, 1.72973],
+         [0.33333, -0.04286, 1.00000], [0.33333, -0.04286, 1.72973],
+         [0.66667, -1.50000, 1.00000], [0.66667, -1.50000, 1.72973],
+         [0.66667, -0.04286, 1.00000], [0.66667, -0.04286, 1.72973]]
+    )
+    assert_almost_equal(ledge, gold_le, 5)
+
+    gold_re = np.array(
+        [[0.33333, -0.04286, 1.72973], [0.33333, -0.04286, 2.50000],
+         [0.33333, 1.50000, 1.72973], [0.33333, 1.50000, 2.50000],
+         [0.66667, -0.04286, 1.72973], [0.66667, -0.04286, 2.50000],
+         [0.66667, 1.50000, 1.72973], [0.66667, 1.50000, 2.50000],
+         [1.00000, -0.04286, 1.72973], [1.00000, -0.04286, 2.50000],
+         [1.00000, 1.50000, 1.72973], [1.00000, 1.50000, 2.50000]]
+    )
+    assert_almost_equal(redge, gold_re, 5)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/tests/test_flagging_methods.py
--- /dev/null
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -0,0 +1,12 @@
+from yt.testing import *
+from yt.utilities.flagging_methods import flagging_method_registry
+
+def setup():
+    global pf
+    pf = fake_random_pf(64)
+    pf.h
+
+def test_over_density():
+    od_flag = flagging_method_registry["overdensity"](0.75) 
+    criterion = (pf.h.grids[0]["Density"] > 0.75)
+    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/tests/test_interpolators.py
--- /dev/null
+++ b/yt/utilities/tests/test_interpolators.py
@@ -0,0 +1,27 @@
+from yt.testing import *
+import yt.utilities.linear_interpolators as lin
+
+def setup():
+    pass
+
+def test_linear_interpolator_1d():
+    random_data = np.random.random(64)
+    fv = {'x': np.mgrid[0.0:1.0:64j]}
+    ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "x", True)
+    assert_array_equal(ufi(fv), random_data)
+
+def test_linear_interpolator_2d():
+    random_data = np.random.random((64, 64))
+    fv = dict((ax, v) for ax, v in zip("xyz",
+               np.mgrid[0.0:1.0:64j, 0.0:1.0:64j]))
+    bfi = lin.BilinearFieldInterpolator(random_data,
+            (0.0, 1.0, 0.0, 1.0), "xy", True)
+    assert_array_equal(bfi(fv), random_data)
+
+def test_linear_interpolator_3d():
+    random_data = np.random.random((64, 64, 64))
+    fv = dict((ax, v) for ax, v in zip("xyz",
+               np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j]))
+    tfi = lin.TrilinearFieldInterpolator(random_data,
+            (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True)
+    assert_array_equal(tfi(fv), random_data)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/utilities/tests/test_kdtrees.py
--- /dev/null
+++ b/yt/utilities/tests/test_kdtrees.py
@@ -0,0 +1,93 @@
+"""
+Unit test the kD trees in yt.
+
+Author: Stephen Skory <s at skory.us>
+Affiliation: U of Colorado
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Stephen Skory.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+
+try:
+    from yt.utilities.kdtree.api import *
+except ImportError:
+    mylog.debug("The Fortran kD-Tree did not import correctly.")
+
+from yt.utilities.spatial import cKDTree
+
+def setup():
+    pass
+
+def test_fortran_tree():
+    r"""This test makes sure that the fortran kdtree is finding the correct
+    nearest neighbors.
+    """
+    # Four points.
+    try:
+        fKD.pos = np.empty((3, 4), dtype='float64', order='F')
+    except NameError:
+        return
+    # Make four points by hand that, in particular, will allow us to test
+    # the periodicity of the kdtree.
+    points = np.array([0.01, 0.5, 0.98, 0.99])
+    fKD.pos[0, :] = points
+    fKD.pos[1, :] = points
+    fKD.pos[2, :] = points
+    fKD.qv = np.empty(3, dtype='float64')
+    fKD.dist = np.empty(4, dtype='float64')
+    fKD.tags = np.empty(4, dtype='int64')
+    fKD.nn = 4
+    fKD.sort = True
+    create_tree(0)
+    # Now we check to make sure that we find the correct nearest neighbors,
+    # which get stored in dist and tags.
+    fKD.qv[:] = 0.999
+    find_nn_nearest_neighbors()
+    # Fix fortran counting.
+    fKD.tags -= 1
+    # Clean up before the tests.
+    free_tree(0)
+    # What the answers should be.
+    dist = np.array([2.43e-04, 3.63e-04, 1.083e-03, 7.47003e-01])
+    tags = np.array([3, 0, 2, 1], dtype='int64')
+    assert_array_almost_equal(fKD.dist, dist)
+    assert_array_equal(fKD.tags, tags)
+
+def test_cython_tree():
+    r"""This test makes sure that the cython kdtree is finding the correct
+    nearest neighbors.
+    """
+    # Four points.
+    pos = np.empty((4, 3), dtype='float64')
+    # Make four points by hand that, in particular, will allow us to test
+    # the periodicity of the kdtree.
+    points = np.array([0.01, 0.5, 0.98, 0.99])
+    pos[:, 0] = points
+    pos[:, 1] = points
+    pos[:, 2] = points
+    kdtree = cKDTree(pos, leafsize = 2)
+    qv = np.array([0.999]*3)
+    res = kdtree.query(qv, 4, period=[1.,1.,1])
+    # What the answers should be.
+    dist = np.array([2.43e-04, 3.63e-04, 1.083e-03, 7.47003e-01])
+    tags = np.array([3, 0, 2, 1], dtype='int64')
+    assert_array_almost_equal(res[0], dist)
+    assert_array_equal(res[1], tags)
+


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -66,6 +66,7 @@
 from plot_window import \
     SlicePlot, \
     OffAxisSlicePlot, \
-    ProjectionPlot
+    ProjectionPlot, \
+    OffAxisProjectionPlot
     
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -21,7 +21,7 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-import numpy as na
+import numpy as np
 
 import matplotlib
 import matplotlib.colors as cc
@@ -83,14 +83,14 @@
 matplotlib.rc('image', cmap="algae")
 
 # This next colormap was designed by Tune Kamae and converted here by Matt
-_vs = na.linspace(0,1,255)
-_kamae_red = na.minimum(255,
-                113.9*na.sin(7.64*(_vs**1.705)+0.701)-916.1*(_vs+1.755)**1.862 \
+_vs = np.linspace(0,1,255)
+_kamae_red = np.minimum(255,
+                113.9*np.sin(7.64*(_vs**1.705)+0.701)-916.1*(_vs+1.755)**1.862 \
               + 3587.9*_vs+2563.4)/255.0
-_kamae_grn = na.minimum(255,
-                70.0*na.sin(8.7*(_vs**1.26)-2.418)+151.7*_vs**0.5+70.0)/255.0
-_kamae_blu = na.minimum(255,
-                194.5*_vs**2.88+99.72*na.exp(-77.24*(_vs-0.742)**2.0)
+_kamae_grn = np.minimum(255,
+                70.0*np.sin(8.7*(_vs**1.26)-2.418)+151.7*_vs**0.5+70.0)/255.0
+_kamae_blu = np.minimum(255,
+                194.5*_vs**2.88+99.72*np.exp(-77.24*(_vs-0.742)**2.0)
               + 45.40*_vs**0.089+10.0)/255.0
 
 cdict = {'red':zip(_vs,_kamae_red,_kamae_red),
@@ -121,15 +121,15 @@
 _h_cubehelix = 1.0
 
 _cubehelix_data = {
-        'red': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.14861 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) + 1.78277 * na.sin(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
-        'green': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.29227 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) - 0.90649 * na.sin(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
-        'blue': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (1.97294 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'red': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.14861 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) + 1.78277 * np.sin(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'green': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.29227 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) - 0.90649 * np.sin(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'blue': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (1.97294 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
 }
 
 add_cmap("cubehelix", _cubehelix_data)
 
 # Add colormaps in _colormap_data.py that weren't defined here
-_vs = na.linspace(0,1,255)
+_vs = np.linspace(0,1,255)
 for k,v in _cm.color_map_luts.iteritems():
     if k not in yt_colormaps:
         cdict = { 'red': zip(_vs,v[0],v[0]),
@@ -143,5 +143,5 @@
     r = cmap._lut[:-3, 0]
     g = cmap._lut[:-3, 1]
     b = cmap._lut[:-3, 2]
-    a = na.ones(b.shape)
+    a = np.ones(b.shape)
     return [r, g, b, a]


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import pyx
-import numpy as na
+import numpy as np
 from matplotlib import cm
 from _mpl_imports import FigureCanvasAgg
 
@@ -243,7 +243,7 @@
             if xdata == None:
                 self.canvas.plot(blank_data)
             else:
-                data = pyx.graph.data.points(na.array([xdata, ydata]).T, x=1, y=2)
+                data = pyx.graph.data.points(np.array([xdata, ydata]).T, x=1, y=2)
                 self.canvas.plot(data, [pyx.graph.style.line([pyx.style.linewidth.Thick])])
         else:
             plot = pyx.graph.graphxy \
@@ -253,7 +253,7 @@
             if xdata == None:
                 plot.plot(blank_data)
             else:
-                data = pyx.graph.data.points(na.array([xdata, ydata]).T, x=1, y=2)
+                data = pyx.graph.data.points(np.array([xdata, ydata]).T, x=1, y=2)
                 plot.plot(data, [pyx.graph.style.line([pyx.style.linewidth.Thick])])
             self.canvas.insert(plot)
         self.axes_drawn = True
@@ -495,7 +495,7 @@
         origin = (origin[0] + shift[0], origin[1] + shift[1])
 
         # Convert the colormap into a string
-        x = na.linspace(1,0,256)
+        x = np.linspace(1,0,256)
         cm_string = cm.cmap_d[name](x, bytes=True)[:,0:3].tostring()
 
         cmap_im = pyx.bitmap.image(imsize[0], imsize[1], "RGB", cm_string)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -28,10 +28,11 @@
     x_dict, \
     y_dict, \
     axis_names
+from .volume_rendering.api import off_axis_projection
 from yt.utilities.lib.misc_utilities import \
     pixelize_cylinder
 import _MPL
-import numpy as na
+import numpy as np
 import weakref
 
 class FixedResolutionBuffer(object):
@@ -355,7 +356,7 @@
         """
         import numdisplay
         numdisplay.open()
-        if take_log: data=na.log10(self[field])
+        if take_log: data=np.log10(self[field])
         else: data=self[field]
         numdisplay.display(data)    
 
@@ -401,7 +402,7 @@
     """
     def __getitem__(self, item):
         if item in self.data: return self.data[item]
-        indices = na.argsort(self.data_source['dx'])[::-1]
+        indices = np.argsort(self.data_source['dx'])[::-1]
         buff = _MPL.CPixelize( self.data_source['x'],   self.data_source['y'],   self.data_source['z'],
                                self.data_source['px'],  self.data_source['py'],
                                self.data_source['pdx'], self.data_source['pdy'], self.data_source['pdz'],
@@ -411,3 +412,28 @@
                                self.bounds).transpose()
         self[item] = buff
         return buff
+
+
+class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
+    def __init__(self, data_source, bounds, buff_size, antialias = True,                                                         
+                 periodic = False):
+        self.data = {}
+        FixedResolutionBuffer.__init__(self, data_source, bounds, buff_size, antialias, periodic)
+
+    def __getitem__(self, item):
+        if item in self.data: return self.data[item]
+        mylog.info("Making a fixed resolutuion buffer of (%s) %d by %d" % \
+            (item, self.buff_size[0], self.buff_size[1]))
+        ds = self.data_source
+        width = (self.bounds[1] - self.bounds[0],
+                 self.bounds[3] - self.bounds[2],
+                 self.bounds[5] - self.bounds[4])
+        buff = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
+                                   width, ds.resolution, item,
+                                   weight=ds.weight_field, volume=ds.volume,
+                                   no_ghost=ds.no_ghost, interpolated=ds.interpolated,
+                                   north_vector=ds.north_vector)
+        self[item] = buff.swapaxes(0,1)
+        return buff
+
+


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/image_panner/vm_panner.py
--- a/yt/visualization/image_panner/vm_panner.py
+++ b/yt/visualization/image_panner/vm_panner.py
@@ -21,7 +21,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import types, os
 from yt.data_objects.construction_data_containers import YTOverlapProjBase
 from yt.data_objects.selection_data_containers import YTSliceBase
@@ -165,7 +165,7 @@
         """
         self.xlim = (low[0], high[0])
         self.ylim = (low[1], high[1])
-        return na.log10(self.buffer)
+        return np.log10(self.buffer)
 
     def set_width(self, width):
         """
@@ -285,7 +285,7 @@
 
     def __call__(self, val):
         self.pylab.clf()
-        self.pylab.imshow(na.log10(val), interpolation='nearest')
+        self.pylab.imshow(np.log10(val), interpolation='nearest')
         self.pylab.savefig("wimage_%03i.png" % self.tile_id)
 
 class TransportAppender(object):
@@ -299,13 +299,13 @@
     def __call__(self, val):
         from yt.utilities.lib import write_png_to_string
         from yt.visualization.image_writer import map_to_colors
-        image = na.log10(val)
-        mi = na.nanmin(image[~na.isinf(image)])
-        ma = na.nanmax(image[~na.isinf(image)])
+        image = np.log10(val)
+        mi = np.nanmin(image[~np.isinf(image)])
+        ma = np.nanmax(image[~np.isinf(image)])
         color_bounds = mi, ma
         image = (image - color_bounds[0])/(color_bounds[1] - color_bounds[0])
         to_plot = map_to_colors(image, "algae")
-        to_plot = na.clip(to_plot, 0, 255)
+        to_plot = np.clip(to_plot, 0, 255)
         s = write_png_to_string(to_plot)
         response_body = "data:image/png;base64," + base64.encodestring(s)
         tf.close()


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -23,7 +23,7 @@
 import types
 import imp
 import os
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import _colormap_data as cmd
@@ -44,7 +44,7 @@
 
         >>> image = scale_image(image, min=0, max=1000)
     """
-    if isinstance(image, na.ndarray) and image.dtype == na.uint8:
+    if isinstance(image, np.ndarray) and image.dtype == np.uint8:
         return image
     if isinstance(image, (types.TupleType, types.ListType)):
         image, mi, ma = image
@@ -52,7 +52,7 @@
         mi = image.min()
     if ma is None:
         ma = image.max()
-    image = (na.clip((image-mi)/(ma-mi) * 255, 0, 255)).astype('uint8')
+    image = (np.clip((image-mi)/(ma-mi) * 255, 0, 255)).astype('uint8')
     return image
 
 def multi_image_composite(fn, red_channel, blue_channel,
@@ -97,26 +97,26 @@
     Examples
     --------
 
-        >>> red_channel = na.log10(frb["Temperature"])
-        >>> blue_channel = na.log10(frb["Density"])
+        >>> red_channel = np.log10(frb["Temperature"])
+        >>> blue_channel = np.log10(frb["Density"])
         >>> multi_image_composite("multi_channel1.png", red_channel, blue_channel)
 
     """
     red_channel = scale_image(red_channel)
     blue_channel = scale_image(blue_channel)
     if green_channel is None:
-        green_channel = na.zeros(red_channel.shape, dtype='uint8')
+        green_channel = np.zeros(red_channel.shape, dtype='uint8')
     else:
         green_channel = scale_image(green_channel)
     if alpha_channel is None:
-        alpha_channel = na.zeros(red_channel.shape, dtype='uint8') + 255
+        alpha_channel = np.zeros(red_channel.shape, dtype='uint8') + 255
     else:
         alpha_channel = scale_image(alpha_channel) 
-    image = na.array([red_channel, green_channel, blue_channel, alpha_channel])
+    image = np.array([red_channel, green_channel, blue_channel, alpha_channel])
     image = image.transpose().copy() # Have to make sure it's contiguous 
     au.write_png(image, fn)
 
-def write_bitmap(bitmap_array, filename, max_val = None, transpose=True):
+def write_bitmap(bitmap_array, filename, max_val = None, transpose=False):
     r"""Write out a bitmapped image directly to a PNG file.
 
     This accepts a three- or four-channel `bitmap_array`.  If the image is not
@@ -141,19 +141,18 @@
         The upper limit to clip values to in the output, if converting to uint8.
         If `bitmap_array` is already uint8, this will be ignore.
     """
-    if bitmap_array.dtype != na.uint8:
+    if bitmap_array.dtype != np.uint8:
         if max_val is None: max_val = bitmap_array.max()
-        bitmap_array = na.clip(bitmap_array / max_val, 0.0, 1.0) * 255
+        bitmap_array = np.clip(bitmap_array / max_val, 0.0, 1.0) * 255
         bitmap_array = bitmap_array.astype("uint8")
     if len(bitmap_array.shape) != 3 or bitmap_array.shape[-1] not in (3,4):
         raise RuntimeError
     if bitmap_array.shape[-1] == 3:
         s1, s2 = bitmap_array.shape[:2]
-        alpha_channel = 255*na.ones((s1,s2,1), dtype='uint8')
-        bitmap_array = na.concatenate([bitmap_array, alpha_channel], axis=-1)
+        alpha_channel = 255*np.ones((s1,s2,1), dtype='uint8')
+        bitmap_array = np.concatenate([bitmap_array, alpha_channel], axis=-1)
     if transpose:
-        for channel in range(bitmap_array.shape[2]):
-            bitmap_array[:,:,channel] = bitmap_array[:,:,channel].T
+        bitmap_array = bitmap_array.swapaxes(0,1)
     if filename is not None:
         au.write_png(bitmap_array.copy(), filename)
     else:
@@ -229,14 +228,14 @@
     """
     image = func(image)
     if color_bounds is None:
-        mi = na.nanmin(image[~na.isinf(image)])
-        ma = na.nanmax(image[~na.isinf(image)])
+        mi = np.nanmin(image[~np.isinf(image)])
+        ma = np.nanmax(image[~np.isinf(image)])
         color_bounds = mi, ma
     else:
         color_bounds = [func(c) for c in color_bounds]
     image = (image - color_bounds[0])/(color_bounds[1] - color_bounds[0])
     to_plot = map_to_colors(image, cmap_name)
-    to_plot = na.clip(to_plot, 0, 255)
+    to_plot = np.clip(to_plot, 0, 255)
     return to_plot
 
 def annotate_image(image, text, xpos, ypos, font_name = "Vera",
@@ -279,7 +278,7 @@
     >>> annotate_image(bitmap, "Hello!", 0, 100)
     >>> write_bitmap(bitmap, "saved.png")
     """
-    if len(image.shape) != 3 or image.dtype != na.uint8:
+    if len(image.shape) != 3 or image.dtype != np.uint8:
         raise RuntimeError("This routine requires a UINT8 bitmapped image.")
     font_path = os.path.join(imp.find_module("matplotlib")[1],
                              "mpl-data/fonts/ttf/",
@@ -295,10 +294,10 @@
         print "Your color map was not found in the extracted colormap file."
         raise KeyError(cmap_name)
     lut = cmd.color_map_luts[cmap_name]
-    x = na.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+    x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
     shape = buff.shape
-    mapped = na.dstack(
-            [(na.interp(buff, x, v)*255) for v in lut ]).astype("uint8")
+    mapped = np.dstack(
+            [(np.interp(buff, x, v)*255) for v in lut ]).astype("uint8")
     return mapped.copy("C")
 
 def strip_colormap_data(fn = "color_map_data.py",
@@ -380,7 +379,7 @@
                          take_log=True)
     """
     import matplotlib
-    from ._mpl_imports import *
+    from ._mpl_imports import FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
 
     # If this is rendered as log, then apply now.
     if take_log:
@@ -421,21 +420,22 @@
     else:
         dpi = None
 
-    if filename[-4:] == '.png':
-        suffix = ''
-    else:
+    suffix = os.path.splitext(filename)[1]
+
+    if suffix == '':
         suffix = '.png'
         filename = "%s%s" % (filename, suffix)
-    mylog.info("Saving plot %s", fn)
+    mylog.info("Saving plot %s", filename)
     if suffix == ".png":
         canvas = FigureCanvasAgg(fig)
     elif suffix == ".pdf":
         canvas = FigureCanvasPdf(fig)
     elif suffix in (".eps", ".ps"):
-        canvas = FigureCanvasPS
+        canvas = FigureCanvasPS(fig)
     else:
         mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
         canvas = FigureCanvasAgg(fig)
+
     canvas.print_figure(filename)
     return filename
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -1,4 +1,4 @@
-"""
+""" 
 All of the base-level stuff for plotting.
 
 Author: Matthew Turk <matthewturk at gmail.com>
@@ -26,7 +26,7 @@
 from matplotlib import figure
 import shutil
 import tempfile
-import numpy as na
+import numpy as np
 import os
 
 from yt.funcs import *
@@ -71,7 +71,7 @@
 
     def add_image(self, fn, descr):
         self.image_metadata.append(descr)
-        self.images.append((os.path.basename(fn), na.fromfile(fn, dtype='c')))
+        self.images.append((os.path.basename(fn), np.fromfile(fn, dtype='c')))
 
 class PlotCollection(object):
     __id_counter = 0
@@ -122,7 +122,7 @@
         elif center == "center" or center == "c":
             self.c = (pf.domain_right_edge + pf.domain_left_edge)/2.0
         else:
-            self.c = na.array(center, dtype='float64')
+            self.c = np.array(center, dtype='float64')
         mylog.info("Created plot collection with default plot-center = %s",
                     list(self.c))
 
@@ -1504,7 +1504,7 @@
     @rootonly
     def save_book(self, filename, author = None, title = None, keywords = None,
                   subject = None, creator = None, producer = None,
-                  creation_data = None):
+                  creation_date = None):
         r"""Save a multipage PDF of all the current plots, rather than
         individual image files.
 
@@ -1551,15 +1551,21 @@
         >>> dd = pf.h.all_data()
         >>> pc.add_phase_object(dd, ["Density", "Temperature", "CellMassMsun"],
         ...                     weight = None)
-        >>> pc.save_book("my_plots.pdf", author="Matthew Turk", 
+        >>> pc.save_book("my_plots.pdf", author="Yours Truly",
         ...              title="Fun plots")
         """
         from matplotlib.backends.backend_pdf import PdfPages
         outfile = PdfPages(filename)
         for plot in self.plots:
             plot.save_to_pdf(outfile)
-        if info is not None:
-            outfile._file.writeObject(outfile._file.infoObject, info)
+        pdf_keys = ['Title', 'Author', 'Subject', 'Keywords', 'Creator',
+            'Producer', 'CreationDate']
+        pdf_values = [title, author, subject, keywords, creator, producer,
+            creation_date]
+        metadata = outfile.infodict()
+        for key, val in zip(pdf_keys, pdf_values):
+            if isinstance(val, str):
+                metadata[key] = val
         outfile.close()
 
 def wrap_pylab_newplot(func):
@@ -1849,7 +1855,7 @@
         norm = matplotlib.colors.Normalize()
     ax = pylab.figure().gca()
     ax.autoscale(False)
-    axi = ax.imshow(na.random.random((npix, npix)),
+    axi = ax.imshow(np.random.random((npix, npix)),
                     extent = extent, norm = norm,
                     origin = 'lower')
     cb = pylab.colorbar(axi, norm = norm)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -1,4 +1,4 @@
-"""
+""" 
 Callbacks to add additional functionality on to plots.
 
 Author: Matthew Turk <matthewturk at gmail.com>
@@ -7,9 +7,12 @@
 Affiliation: UC Berkeley
 Author: Stephen Skory <s at skory.us>
 Affiliation: UC San Diego
+Author: Anthony Scopatz <scopatz at gmail.com>
+Affiliation: The University of Chicago
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2008-2011 Matthew Turk, JS Oishi, Stephen Skory.  All Rights Reserved.
+  Copyright (C) 2008-2012 Matthew Turk, JS Oishi, Stephen Skory, Anthony Scopatz.  
+  All Rights Reserved.
 
   This file is part of yt.
 
@@ -27,7 +30,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from _mpl_imports import *
@@ -52,25 +55,25 @@
     def convert_to_plot(self, plot, coord, offset = True):
         # coord should be a 2 x ncoord array-like datatype.
         try:
-            ncoord = na.array(coord).shape[1]
+            ncoord = np.array(coord).shape[1]
         except IndexError:
             ncoord = 1
 
         # Convert the data and plot limits to tiled numpy arrays so that
         # convert_to_plot is automatically vectorized.
 
-        x0 = na.tile(plot.xlim[0],ncoord)
-        x1 = na.tile(plot.xlim[1],ncoord)
-        xx0 = na.tile(plot._axes.get_xlim()[0],ncoord)
-        xx1 = na.tile(plot._axes.get_xlim()[1],ncoord)
+        x0 = np.tile(plot.xlim[0],ncoord)
+        x1 = np.tile(plot.xlim[1],ncoord)
+        xx0 = np.tile(plot._axes.get_xlim()[0],ncoord)
+        xx1 = np.tile(plot._axes.get_xlim()[1],ncoord)
         
-        y0 = na.tile(plot.ylim[0],ncoord)
-        y1 = na.tile(plot.ylim[1],ncoord)
-        yy0 = na.tile(plot._axes.get_ylim()[0],ncoord)
-        yy1 = na.tile(plot._axes.get_ylim()[1],ncoord)
+        y0 = np.tile(plot.ylim[0],ncoord)
+        y1 = np.tile(plot.ylim[1],ncoord)
+        yy0 = np.tile(plot._axes.get_ylim()[0],ncoord)
+        yy1 = np.tile(plot._axes.get_ylim()[1],ncoord)
         
         # We need a special case for when we are only given one coordinate.
-        if na.array(coord).shape == (2,):
+        if np.array(coord).shape == (2,):
             return ((coord[0]-x0)/(x1-x0)*(xx1-xx0) + xx0,
                     (coord[1]-y0)/(y1-y0)*(yy1-yy0) + yy0)
         else:
@@ -146,7 +149,9 @@
     def __call__(self, plot):
         # Instantiation of these is cheap
         if plot._type_name == "CuttingPlane":
-            print "WARNING: Magnetic field on Cutting Plane Not implemented."
+            qcb = CuttingQuiverCallback("CuttingPlaneBx",
+                                        "CuttingPlaneBy",
+                                        self.factor)
         else:
             xv = "B%s" % (x_names[plot.data.axis])
             yv = "B%s" % (y_names[plot.data.axis])
@@ -195,10 +200,10 @@
                              plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
                            (x0, x1, y0, y1),).transpose()
-        X,Y = na.meshgrid(na.linspace(xx0,xx1,nx,endpoint=True),
-                          na.linspace(yy0,yy1,ny,endpoint=True))
+        X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
         if self.normalize:
-            nn = na.sqrt(pixX**2 + pixY**2)
+            nn = np.sqrt(pixX**2 + pixY**2)
             pixX /= nn
             pixY /= nn
         plot._axes.quiver(X,Y, pixX, pixY, scale=self.scale, scale_units=self.scale_units)
@@ -210,7 +215,7 @@
     _type_name = "contour"
     def __init__(self, field, ncont=5, factor=4, clim=None,
                  plot_args = None):
-        """
+        """ 
         annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None,
                          plot_args = None):
 
@@ -250,12 +255,12 @@
         #appropriate shift to the coppied field.  
 
         #set the cumulative arrays for the periodic shifting.
-        AllX = na.zeros(plot.data["px"].size, dtype='bool')
-        AllY = na.zeros(plot.data["py"].size, dtype='bool')
+        AllX = np.zeros(plot.data["px"].size, dtype='bool')
+        AllY = np.zeros(plot.data["py"].size, dtype='bool')
         XShifted = plot.data["px"].copy()
         YShifted = plot.data["py"].copy()
         dom_x, dom_y = plot._period
-        for shift in na.mgrid[-1:1:3j]:
+        for shift in np.mgrid[-1:1:3j]:
             xlim = ((plot.data["px"] + shift*dom_x >= x0)
                  &  (plot.data["px"] + shift*dom_x <= x1))
             ylim = ((plot.data["py"] + shift*dom_y >= y0)
@@ -269,24 +274,24 @@
         wI = (AllX & AllY)
 
         # We want xi, yi in plot coordinates
-        xi, yi = na.mgrid[xx0:xx1:numPoints_x/(self.factor*1j),\
+        xi, yi = np.mgrid[xx0:xx1:numPoints_x/(self.factor*1j),\
                           yy0:yy1:numPoints_y/(self.factor*1j)]
 
         # This converts XShifted and YShifted into plot coordinates
         x = (XShifted[wI]-x0)*dx + xx0
         y = (YShifted[wI]-y0)*dy + yy0
         z = plot.data[self.field][wI]
-        if plot.pf.field_info[self.field].take_log: z=na.log10(z)
+        if plot.pf.field_info[self.field].take_log: z=np.log10(z)
 
         # Both the input and output from the triangulator are in plot
         # coordinates
         zi = self.triang(x,y).nn_interpolator(z)(xi,yi)
         
         if plot.pf.field_info[self.field].take_log and self.clim is not None: 
-            self.clim = (na.log10(self.clim[0]), na.log10(self.clim[1]))
+            self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 
-            self.ncont = na.linspace(self.clim[0], self.clim[1], ncont)
+            self.ncont = np.linspace(self.clim[0], self.clim[1], self.ncont)
         
         plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
@@ -296,7 +301,7 @@
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
     def __init__(self, alpha=1.0, min_pix=1, annotate=False, periodic=True):
-        """
+        """ 
         annotate_grids(alpha=1.0, min_pix=1, annotate=False, periodic=True)
 
         Adds grid boundaries to a plot, optionally with *alpha*-blending.
@@ -323,11 +328,11 @@
         py_index = y_dict[plot.data.axis]
         dom = plot.data.pf.domain_right_edge - plot.data.pf.domain_left_edge
         if self.periodic:
-            pxs, pys = na.mgrid[-1:1:3j,-1:1:3j]
+            pxs, pys = np.mgrid[-1:1:3j,-1:1:3j]
         else:
-            pxs, pys = na.mgrid[0:0:1j,0:0:1j]
-        GLE = plot.data.grid_left_edge
-        GRE = plot.data.grid_right_edge
+            pxs, pys = np.mgrid[0:0:1j,0:0:1j]
+        GLE = plot.data.pf.h.grid_left_edge
+        GRE = plot.data.pf.h.grid_right_edge
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
             pxo = px_off * dom[px_index]
             pyo = py_off * dom[py_index]
@@ -335,11 +340,11 @@
             left_edge_py = (GLE[:,py_index]+pyo-y0)*dy
             right_edge_px = (GRE[:,px_index]+pxo-x0)*dx
             right_edge_py = (GRE[:,py_index]+pyo-y0)*dy
-            verts = na.array(
+            verts = np.array(
                 [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
                  (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
-            visible =  ( right_edge_px - left_edge_px > self.min_pix ) & \
-                       ( right_edge_px - left_edge_px > self.min_pix )
+            visible =  (right_edge_px - left_edge_px > self.min_pix) & \
+                       (right_edge_px - left_edge_px > self.min_pix)
             verts=verts.transpose()[visible,:,:]
             if verts.size == 0: continue
             edgecolors = (0.0,0.0,0.0,self.alpha)
@@ -358,39 +363,30 @@
 
 class StreamlineCallback(PlotCallback):
     _type_name = "streamlines"
-    def __init__(self, field_x, field_y, factor=6.0, nx=16, ny=16,
-                 xstart=(0,1), ystart=(0,1), nsample=256,
-                 start_at_xedge=False, start_at_yedge=False,
-                 plot_args=None):
+    def __init__(self, field_x, field_y, factor = 16,
+                 density = 1, arrowsize = 1, arrowstyle = None,
+                 color = None, normalize = False):
         """
-        annotate_streamlines(field_x, field_y, factor=6.0, nx=16, ny=16,
-                             xstart=(0,1), ystart=(0,1), nsample=256,
-                             start_at_xedge=False, start_at_yedge=False,
-                             plot_args=None):
+        annotate_streamlines(field_x, field_y, factor = 16, density = 1,
+                             arrowsize = 1, arrowstyle = None,
+                             color = None, normalize = False):
 
         Add streamlines to any plot, using the *field_x* and *field_y*
-        from the associated data, using *nx* and *ny* starting points
-        that are bounded by *xstart* and *ystart*.  To begin
-        streamlines from the left edge of the plot, set
-        *start_at_xedge* to True; for the bottom edge, use
-        *start_at_yedge*.  A line with the qmean vector magnitude will
-        cover 1.0/*factor* of the image.
+        from the associated data, skipping every *factor* datapoints like
+        'quiver'. *density* is the index of the amount of the streamlines.
         """
         PlotCallback.__init__(self)
         self.field_x = field_x
         self.field_y = field_y
-        self.xstart = xstart
-        self.ystart = ystart
-        self.nsample = nsample
+        self.bv_x = self.bv_y = 0
         self.factor = factor
-        if start_at_xedge:
-            self.data_size = (1,ny)
-        elif start_at_yedge:
-            self.data_size = (nx,1)
-        else:
-            self.data_size = (nx,ny)
-        if plot_args is None: plot_args = {'color':'k', 'linestyle':'-'}
-        self.plot_args = plot_args
+        self.dens = density
+        self.arrowsize = arrowsize
+        if arrowstyle is None : arrowstyle='-|>'
+        self.arrowstyle = arrowstyle
+        if color is None : color = "#000000"
+        self.color = color
+        self.normalize = normalize
         
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -398,40 +394,31 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0]
-        ny = plot.image._A.shape[1]
+        nx = plot.image._A.shape[0] / self.factor
+        ny = plot.image._A.shape[1] / self.factor
         pixX = _MPL.Pixelize(plot.data['px'],
                              plot.data['py'],
                              plot.data['pdx'],
                              plot.data['pdy'],
-                             plot.data[self.field_x],
+                             plot.data[self.field_x] - self.bv_x,
                              int(nx), int(ny),
-                           (x0, x1, y0, y1),)
+                           (x0, x1, y0, y1),).transpose()
         pixY = _MPL.Pixelize(plot.data['px'],
                              plot.data['py'],
                              plot.data['pdx'],
                              plot.data['pdy'],
-                             plot.data[self.field_y],
+                             plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
-                           (x0, x1, y0, y1),)
-        r0 = na.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
-                      self.ystart[0]*ny:self.ystart[1]*ny:self.data_size[1]*1j]
-        lines = na.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
-        lines[0,:,:,:] = r0
-        mag = na.sqrt(pixX**2 + pixY**2)
-        scale = na.sqrt(nx*ny) / (self.factor * mag.mean())
-        dt = 1.0 / (self.nsample-1)
-        for i in range(1,self.nsample):
-            xt = lines[i-1,0,:,:]
-            yt = lines[i-1,1,:,:]
-            ix = na.maximum(na.minimum((xt).astype('int'), nx-1), 0)
-            iy = na.maximum(na.minimum((yt).astype('int'), ny-1), 0)
-            lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
-            lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
-        for i in range(self.data_size[0]):
-            for j in range(self.data_size[1]):
-                plot._axes.plot(lines[:,0,i,j], lines[:,1,i,j],
-                                **self.plot_args)
+                           (x0, x1, y0, y1),).transpose()
+        X,Y = (na.linspace(xx0,xx1,nx,endpoint=True),
+                          na.linspace(yy0,yy1,ny,endpoint=True))
+        if self.normalize:
+            nn = na.sqrt(pixX**2 + pixY**2)
+            pixX /= nn
+            pixY /= nn
+        plot._axes.streamplot(X,Y, pixX, pixY, density=self.dens,
+                              arrowsize=self.arrowsize, arrowstyle=self.arrowstyle,
+                              color=self.color, norm=self.normalize)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
@@ -452,6 +439,30 @@
         plot._axes.set_xlabel(self.label)
         plot._axes.set_ylabel(self.label)
 
+class TimeCallback(PlotCallback):
+    _type_name = "time"
+    def __init__(self, format_code='10.7e'):
+        """
+        This annotates the plot with the current simulation time.
+        For now, the time is displayed in seconds.
+        *format_code* can be optionally set, allowing a custom 
+        c-style format code for the time display.
+        """
+        self.format_code = format_code
+        PlotCallback.__init__(self)
+    
+    def __call__(self, plot):
+        current_time = plot.pf.current_time/plot.pf['Time']
+        timestring = format(current_time,self.format_code)
+        base = timestring[:timestring.find('e')]
+        exponent = timestring[timestring.find('e')+1:]
+        if exponent[0] == '+':
+            exponent = exponent[1:]
+        timestring = r'$t\/=\/'+base+''+r'\times\,10^{'+exponent+r'}\, \rm{s}$'
+        from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
+        at = AnchoredText(timestring, prop=dict(size=12), frameon=True, loc=4)
+        plot._axes.add_artist(at)
+
 def get_smallest_appropriate_unit(v, pf):
     max_nu = 1e30
     good_u = None
@@ -489,18 +500,18 @@
         max_dx = plot.data['pdx'].max()
         w_min_x = 250.0 * min_dx
         w_max_x = 1.0 / self.factor
-        min_exp_x = na.ceil(na.log10(w_min_x*plot.data.pf[self.unit])
-                           /na.log10(self.factor))
-        max_exp_x = na.floor(na.log10(w_max_x*plot.data.pf[self.unit])
-                            /na.log10(self.factor))
+        min_exp_x = np.ceil(np.log10(w_min_x*plot.data.pf[self.unit])
+                           /np.log10(self.factor))
+        max_exp_x = np.floor(np.log10(w_max_x*plot.data.pf[self.unit])
+                            /np.log10(self.factor))
         n_x = max_exp_x - min_exp_x + 1
-        widths = na.logspace(min_exp_x, max_exp_x, num = n_x, base=self.factor)
+        widths = np.logspace(min_exp_x, max_exp_x, num = n_x, base=self.factor)
         widths /= plot.data.pf[self.unit]
         left_edge_px = (center[xi] - widths/2.0 - x0)*dx
         left_edge_py = (center[yi] - widths/2.0 - y0)*dy
         right_edge_px = (center[xi] + widths/2.0 - x0)*dx
         right_edge_py = (center[yi] + widths/2.0 - y0)*dy
-        verts = na.array(
+        verts = np.array(
                 [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
                  (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
         visible =  ( right_edge_px - left_edge_px > 25 ) & \
@@ -607,7 +618,7 @@
         plot._axes.hold(True)
         nx = plot.image._A.shape[0] / self.factor
         ny = plot.image._A.shape[1] / self.factor
-        indices = na.argsort(plot.data['dx'])[::-1]
+        indices = np.argsort(plot.data['dx'])[::-1]
         pixX = _MPL.CPixelize( plot.data['x'], plot.data['y'], plot.data['z'],
                                plot.data['px'], plot.data['py'],
                                plot.data['pdx'], plot.data['pdy'], plot.data['pdz'],
@@ -622,8 +633,8 @@
                                plot.data[self.field_y],
                                int(nx), int(ny),
                                (x0, x1, y0, y1),).transpose()
-        X = na.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
-        Y = na.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
+        X = np.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
+        Y = np.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
         plot._axes.quiver(X,Y, pixX, pixY)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
@@ -659,7 +670,7 @@
         DomainWidth = DomainRight - DomainLeft
         
         nx, ny = plot.image._A.shape
-        buff = na.zeros((nx,ny),dtype='float64')
+        buff = np.zeros((nx,ny),dtype='float64')
         for i,clump in enumerate(reversed(self.clumps)):
             mylog.debug("Pixelizing contour %s", i)
 
@@ -673,7 +684,7 @@
                                  clump['dx']*0.0+i+1, # inits inside Pixelize
                                  int(nx), int(ny),
                              (x0, x1, y0, y1), 0).transpose()
-            buff = na.maximum(temp, buff)
+            buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
                                      **self.plot_args)
         plot._axes.hold(False)
@@ -695,9 +706,13 @@
         self.plot_args = plot_args
 
     def __call__(self, plot):
+        if len(self.pos) == 3:
+            pos = (self.pos[x_dict[plot.data.axis]],
+                   self.pos[y_dict[plot.data.axis]])
+        else: pos = self.pos
         from matplotlib.patches import Arrow
         # Now convert the pixels to code information
-        x, y = self.convert_to_plot(plot, self.pos)
+        x, y = self.convert_to_plot(plot, pos)
         dx, dy = self.convert_to_plot(plot, self.code_size, False)
         arrow = Arrow(x, y, dx, dy, **self.plot_args)
         plot._axes.add_patch(arrow)
@@ -717,12 +732,13 @@
         self.text_args = text_args
 
     def __call__(self, plot):
-
-
+        if len(self.pos) == 3:
+            pos = (self.pos[x_dict[plot.data.axis]],
+                   self.pos[y_dict[plot.data.axis]])
+        else: pos = self.pos
         width,height = plot.image._A.shape
-        x,y = self.convert_to_plot(plot, self.pos)
-        x,y = x/width,y/height
-
+        x,y = self.convert_to_plot(plot, pos)
+        
         plot._axes.text(x, y, self.text, **self.text_args)
 
 class MarkerAnnotateCallback(PlotCallback):
@@ -817,7 +833,7 @@
             if size < self.min_size or size > self.max_size: continue
             # This could use halo.maximum_radius() instead of width
             if self.width is not None and \
-                na.abs(halo.center_of_mass() - 
+                np.abs(halo.center_of_mass() - 
                        plot.data.center)[plot.data.axis] > \
                    self.width:
                 continue
@@ -1065,8 +1081,8 @@
         LE[zax] = data.center[zax] - self.width*0.5
         RE[zax] = data.center[zax] + self.width*0.5
         if self.region is not None \
-            and na.all(self.region.left_edge <= LE) \
-            and na.all(self.region.right_edge >= RE):
+            and np.all(self.region.left_edge <= LE) \
+            and np.all(self.region.right_edge >= RE):
             return self.region
         self.region = data.pf.h.periodic_region(
             data.center, LE, RE)
@@ -1089,35 +1105,149 @@
 
 class FlashRayDataCallback(PlotCallback):
     _type_name = "flash_ray_data"
-    def __init__(self, pf, cmap_name='bone', sample=None):
+    def __init__(self, cmap_name='bone', sample=None):
+        """ 
+        annotate_flash_ray_data(cmap_name='bone', sample=None)
+
+        Adds ray trace data to the plot.  *cmap_name* is the name of the color map 
+        ('bone', 'jet', 'hot', etc).  *sample* dictates the amount of down sampling 
+        to do to prevent all of the rays from being  plotted.  This may be None 
+        (plot all rays, default), an integer (step size), or a slice object.
         """
-        annotate_flash_ray_data(pf, cmap_name='bone', sample=None)
-
-        Accepts a *pf* and adds ray trace data to the plot.  *cmap_name* is the
-        name of the color map ('bone', 'jet', 'hot', etc).  *sample* dictates 
-        the amount of down sampling to do to prevent all of the rays from being 
-        plotted.  This may be None (plot all rays, default), an integer (step size), 
-        or a slice object.
-        """
-        self.ray_data = pf._handle["RayData"][:]
-        idx = self.ray_data[:,0].argsort(kind="mergesort")
-        self.ray_data = self.ray_data[idx]
         self.cmap_name = cmap_name
         self.sample = sample if isinstance(sample, slice) else slice(None, None, sample)
 
     def __call__(self, plot):
-        tags = self.ray_data[:,0]
-        coords = self.ray_data[:,1:3]
-        power = self.ray_data[:,4]
+        ray_data = plot.data.pf._handle["RayData"][:]
+        idx = ray_data[:,0].argsort(kind="mergesort")
+        ray_data = ray_data[idx]
+
+        tags = ray_data[:,0]
+        coords = ray_data[:,1:3]
+        power = ray_data[:,4]
         power /= power.max()
         cx, cy = self.convert_to_plot(plot, coords.T)
         coords[:,0], coords[:,1] = cx, cy
-        splitidx = na.argwhere(0 < (tags[1:] - tags[:-1])) + 1
-        coords = na.split(coords, splitidx.flat)[self.sample]
-        power = na.split(power, splitidx.flat)[self.sample]
+        splitidx = np.argwhere(0 < (tags[1:] - tags[:-1])) + 1
+        coords = np.split(coords, splitidx.flat)[self.sample]
+        power = np.split(power, splitidx.flat)[self.sample]
         cmap = matplotlib.cm.get_cmap(self.cmap_name)
 
         plot._axes.hold(True)
-        lc = matplotlib.collections.LineCollection(coords, colors=[cmap(p.max()) for p in power])
+        colors = [cmap(p.max()) for p in power]
+        lc = matplotlib.collections.LineCollection(coords, colors=colors)
         plot._axes.add_collection(lc)
         plot._axes.hold(False)
+
+
+class TimestampCallback(PlotCallback):
+    _type_name = "timestamp"
+    _time_conv = {
+          'as': 1e-18,
+          'attosec': 1e-18,
+          'attosecond': 1e-18,
+          'attoseconds': 1e-18,
+          'fs': 1e-15,
+          'femtosec': 1e-15,
+          'femtosecond': 1e-15,
+          'femtoseconds': 1e-15,
+          'ps': 1e-12,
+          'picosec': 1e-12,
+          'picosecond': 1e-12,
+          'picoseconds': 1e-12,
+          'ns': 1e-9,
+          'nanosec': 1e-9,
+          'nanosecond':1e-9,
+          'nanoseconds' : 1e-9,
+          'us': 1e-6,
+          'microsec': 1e-6,
+          'microsecond': 1e-6,
+          'microseconds': 1e-6,
+          'ms': 1e-3,
+          'millisec': 1e-3,
+          'millisecond': 1e-3,
+          'milliseconds': 1e-3,
+          's': 1.0,
+          'sec': 1.0,
+          'second':1.0,
+          'seconds': 1.0,
+          'm': 60.0,
+          'min': 60.0,
+          'minute': 60.0,
+          'minutes': 60.0,
+          'h': 3600.0,
+          'hour': 3600.0,
+          'hours': 3600.0,
+          'd': 86400.0,
+          'day': 86400.0,
+          'days': 86400.0,
+          'y': 86400.0*365.25,
+          'year': 86400.0*365.25,
+          'years': 86400.0*365.25,
+          'ev': 1e-9 * 7.6e-8 / 6.03,
+          'kev': 1e-12 * 7.6e-8 / 6.03,
+          'mev': 1e-15 * 7.6e-8 / 6.03,
+          }
+
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", **kwargs):
+        """ 
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
+
+        Adds the current time to the plot at point given by *x* and *y*.  If *units* 
+        is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 
+        *units* is None, it will attempt to figure out the correct value by which to 
+        scale.  The *format* keyword is a template string that will be evaluated and 
+        displayed on the plot.  All other *kwargs* will be passed to the text() 
+        method on the plot axes.  See matplotlib's text() functions for more 
+        information.
+        """
+        self.x = x
+        self.y = y
+        self.format = format
+        self.units = units
+        self.kwargs = {'color': 'w'}
+        self.kwargs.update(kwargs)
+
+    def __call__(self, plot):
+        if self.units is None:
+            t = plot.data.pf.current_time
+            scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
+            self.units = 's'
+            for k in scale_keys:
+                if t < self._time_conv[k]:
+                    break
+                self.units = k
+        t = plot.data.pf.current_time / self._time_conv[self.units.lower()]
+        if self.units == 'us':
+            self.units = '$\\mu s$'
+        s = self.format.format(time=t, units=self.units)
+        plot._axes.hold(True)
+        plot._axes.text(self.x, self.y, s, **self.kwargs)
+        plot._axes.hold(False)
+
+
+class MaterialBoundaryCallback(ContourCallback):
+    _type_name = "material_boundary"
+    def __init__(self, field='targ', ncont=1, factor=4, clim=(0.9, 1.0), **kwargs):
+        """ 
+        annotate_material_boundary(self, field='targ', ncont=1, factor=4, 
+                                   clim=(0.9, 1.0), **kwargs):
+
+        Add the limiting contours of *field* to the plot.  Nominally, *field* is 
+        the target material but may be any other field present in the hierarchy.
+        The number of contours generated is given by *ncount*, *factor* governs 
+        the number of points used in the interpolation, and *clim* gives the 
+        (upper, lower) limits for contouring.  For this to truly be the boundary
+        *clim* should be close to the edge.  For example the default is (0.9, 1.0)
+        for 'targ' which is defined on the range [0.0, 1.0].  All other *kwargs* 
+        will be passed to the contour() method on the plot axes.  See matplotlib
+        for more information.
+        """
+        plot_args = {'colors': 'w'}
+        plot_args.update(kwargs)
+        super(MaterialBoundaryCallback, self).__init__(field=field, ncont=ncont, 
+                                                       factor=factor, clim=clim,
+                                                       plot_args=plot_args)
+
+    def __call__(self, plot):
+        super(MaterialBoundaryCallback, self).__call__(plot)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from _mpl_imports import *
@@ -183,21 +183,21 @@
         if (zmin in (None,'min')) or (zmax in (None,'max')):    
             imbuff = self._axes.images[-1]._A
             if zmin == 'min':
-                zmin = na.nanmin(imbuff[na.nonzero(imbuff)])
+                zmin = np.nanmin(imbuff[np.nonzero(imbuff)])
                 if dex is not None:
-                    zmax = min(zmin*10**(dex),na.nanmax(imbuff))
+                    zmax = min(zmin*10**(dex),np.nanmax(imbuff))
             if zmax == 'max':
-                zmax = na.nanmax(imbuff)
+                zmax = np.nanmax(imbuff)
                 if dex is not None:
-                    zmin = max(zmax/(10**(dex)),na.nanmin(imbuff))
+                    zmin = max(zmax/(10**(dex)),np.nanmin(imbuff))
         if self.colorbar is not None:
             if ticks is not None:
-                ticks = na.sort(ticks)
+                ticks = np.sort(ticks)
                 self.colorbar.locator = matplotlib.ticker.FixedLocator(ticks)
                 self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (x) for x in ticks])
             elif minmaxtick:
                 if self.log_field: 
-                    ticks = na.array(self.colorbar._ticker()[1],dtype='float')
+                    ticks = np.array(self.colorbar._ticker()[1],dtype='float')
                     ticks = [zmin] + ticks.tolist() + [zmax]
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(ticks)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (x) for x in ticks])
@@ -205,11 +205,11 @@
                     mylog.error('Sorry, we do not support minmaxtick for linear fields.  It likely comes close by default')
             elif nticks is not None:
                 if self.log_field:
-                    lin = na.linspace(na.log10(zmin),na.log10(zmax),nticks)
+                    lin = np.linspace(np.log10(zmin),np.log10(zmax),nticks)
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(10**lin)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (10**x) for x in lin])
                 else: 
-                    lin = na.linspace(zmin,zmax,nticks)
+                    lin = np.linspace(zmin,zmax,nticks)
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(lin)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % x for x in lin])
 
@@ -218,7 +218,7 @@
                     self.colorbar.locator = self._old_locator
                 if hasattr(self,'_old_formatter'):
                     self.colorbar.formatter = self._old_formatter
-        self.norm.autoscale(na.array([zmin,zmax], dtype='float64'))
+        self.norm.autoscale(np.array([zmin,zmax], dtype='float64'))
         self.image.changed()
         if self.colorbar is not None:
             mpl_notify(self.image, self.colorbar)
@@ -343,7 +343,7 @@
             self.colorbar.formatter = ttype()
 
     def __init_temp_image(self, setup_colorbar):
-        temparray = na.ones(self.size)
+        temparray = np.ones(self.size)
         self.image = \
             self._axes.imshow(temparray, interpolation='nearest',
                              norm = self.norm, aspect=1.0, picker=True,
@@ -394,20 +394,20 @@
         if self[self.axis_names["Z"]].size == 0:
             raise YTNoDataInObjectError(self.data)
         mylog.debug("Received buffer of min %s and max %s (data: %s %s)",
-                    na.nanmin(buff), na.nanmax(buff),
+                    np.nanmin(buff), np.nanmax(buff),
                     self[self.axis_names["Z"]].min(),
                     self[self.axis_names["Z"]].max())
         if self.log_field:
-            bI = na.where(buff > 0)
+            bI = np.where(buff > 0)
             if len(bI[0]) == 0:
                 newmin = 1e-99
                 newmax = 1e-99
             else:
-                newmin = na.nanmin(buff[bI])
-                newmax = na.nanmax(buff[bI])
+                newmin = np.nanmin(buff[bI])
+                newmax = np.nanmax(buff[bI])
         else:
-            newmin = na.nanmin(buff)
-            newmax = na.nanmax(buff)
+            newmin = np.nanmin(buff)
+            newmax = np.nanmax(buff)
         aspect = (self.ylim[1]-self.ylim[0])/(self.xlim[1]-self.xlim[0])
         if self.image._A.size != buff.size:
             self._axes.clear()
@@ -418,7 +418,7 @@
             self.image.set_data(buff)
         if self._axes.get_aspect() != aspect: self._axes.set_aspect(aspect)
         if self.do_autoscale:
-            self.norm.autoscale(na.array((newmin,newmax), dtype='float64'))
+            self.norm.autoscale(np.array((newmin,newmax), dtype='float64'))
         self._reset_image_parameters()
         self._run_callbacks()
 
@@ -476,8 +476,8 @@
         self._redraw_image()
 
     def autoscale(self):
-        zmin = na.nanmin(self._axes.images[-1]._A)
-        zmax = na.nanmax(self._axes.images[-1]._A)
+        zmin = np.nanmin(self._axes.images[-1]._A)
+        zmax = np.nanmax(self._axes.images[-1]._A)
         self.set_zlim(zmin, zmax)
 
     def switch_y(self, *args, **kwargs):
@@ -558,16 +558,16 @@
         numPoints_y = int(width)
         dx = numPoints_x / (x1-x0)
         dy = numPoints_y / (y1-y0)
-        xlim = na.logical_and(self.data["px"]+2.0*self.data['pdx'] >= x0,
+        xlim = np.logical_and(self.data["px"]+2.0*self.data['pdx'] >= x0,
                               self.data["px"]-2.0*self.data['pdx'] <= x1)
-        ylim = na.logical_and(self.data["py"]+2.0*self.data['pdy'] >= y0,
+        ylim = np.logical_and(self.data["py"]+2.0*self.data['pdy'] >= y0,
                               self.data["py"]-2.0*self.data['pdy'] <= y1)
-        wI = na.where(na.logical_and(xlim,ylim))
-        xi, yi = na.mgrid[0:numPoints_x, 0:numPoints_y]
+        wI = np.where(np.logical_and(xlim,ylim))
+        xi, yi = np.mgrid[0:numPoints_x, 0:numPoints_y]
         x = (self.data["px"][wI]-x0)*dx
         y = (self.data["py"][wI]-y0)*dy
         z = self.data[self.axis_names["Z"]][wI]
-        if self.log_field: z=na.log10(z)
+        if self.log_field: z=np.log10(z)
         buff = de.Triangulation(x,y).nn_interpolator(z)(xi,yi)
         buff = buff.clip(z.min(), z.max())
         if self.log_field: buff = 10**buff
@@ -603,7 +603,7 @@
         else:
             height = width
         self.pix = (width,height)
-        indices = na.argsort(self.data['dx'])[::-1]
+        indices = np.argsort(self.data['dx'])[::-1]
         buff = _MPL.CPixelize( self.data['x'], self.data['y'], self.data['z'],
                                self.data['px'], self.data['py'],
                                self.data['pdx'], self.data['pdy'], self.data['pdz'],
@@ -756,7 +756,7 @@
             func = self._axes.semilogy
         elif self._log_x and self._log_y:
             func = self._axes.loglog
-        indices = na.argsort(self.data[self.fields[0]])
+        indices = np.argsort(self.data[self.fields[0]])
         func(self.data[self.fields[0]][indices],
              self.data[self.fields[1]][indices],
              **self.plot_options)
@@ -823,7 +823,7 @@
             cb(self)
 
     def __init_colorbar(self):
-        temparray = na.ones((self.x_bins.size, self.y_bins.size))
+        temparray = np.ones((self.x_bins.size, self.y_bins.size))
         self.norm = matplotlib.colors.Normalize()
         self.image = self._axes.pcolormesh(self.x_bins, self.y_bins,
                                       temparray, shading='flat',
@@ -858,13 +858,13 @@
         #self._redraw_image()
         if (zmin is None) or (zmax is None):    
             if zmin == 'min':
-                zmin = na.nanmin(self._axes.images[-1]._A)
+                zmin = np.nanmin(self._axes.images[-1]._A)
                 if dex is not None:
-                    zmax = min(zmin*10**(dex),na.nanmax(self._axes.images[-1]._A))
+                    zmax = min(zmin*10**(dex),np.nanmax(self._axes.images[-1]._A))
             if zmax == 'max':
-                zmax = na.nanmax(self._axes.images[-1]._A)
+                zmax = np.nanmax(self._axes.images[-1]._A)
                 if dex is not None:
-                    zmin = max(zmax/(10**(dex)),na.nanmin(self._axes.images[-1]._A))
+                    zmin = max(zmax/(10**(dex)),np.nanmin(self._axes.images[-1]._A))
         self._zlim = (zmin, zmax)
 
     def set_log_field(self, val):
@@ -883,8 +883,8 @@
     def _redraw_image(self):
         vals = self.data[self.fields[2]].transpose()
         used_bin = self.data["UsedBins"].transpose()
-        vmin = na.nanmin(vals[used_bin])
-        vmax = na.nanmax(vals[used_bin])
+        vmin = np.nanmin(vals[used_bin])
+        vmax = np.nanmax(vals[used_bin])
         if self._zlim is not None: vmin, vmax = self._zlim
         if self._log_z:
             # We want smallest non-zero vmin
@@ -892,10 +892,10 @@
                                                 clip=False)
             self.ticker = matplotlib.ticker.LogLocator()
             if self._zlim is None:
-                vI = na.where(vals > 0)
+                vI = np.where(vals > 0)
                 vmin = vals[vI].min()
                 vmax = vals[vI].max()
-            self.norm.autoscale(na.array((vmin,vmax), dtype='float64'))
+            self.norm.autoscale(np.array((vmin,vmax), dtype='float64'))
         else:
             self.norm=matplotlib.colors.Normalize(vmin=vmin, vmax=vmax,
                                                   clip=False)
@@ -979,7 +979,7 @@
             func = self._axes.semilogy
         elif self._log_x and self._log_y:
             func = self._axes.loglog
-        indices = na.argsort(self.data[self.fields[0]])
+        indices = np.argsort(self.data[self.fields[0]])
         func(self.data[self.fields[0]][indices],
              self.data[self.fields[1]][indices],
              **self.plot_options)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -26,19 +26,25 @@
 """
 import base64
 import matplotlib.figure
+from matplotlib.mathtext import MathTextParser
+try:
+    from matplotlib.pyparsing import ParseFatalException
+except ImportError:
+    from pyparsing import ParseFatalException
 import cStringIO
 import types
 import __builtin__
 from functools import wraps
 
-import numpy as na
+import numpy as np
 from ._mpl_imports import *
 from .color_maps import yt_colormaps, is_colormap
 from .image_writer import \
     write_image, apply_colormap
 from .fixed_resolution import \
     FixedResolutionBuffer, \
-    ObliqueFixedResolutionBuffer
+    ObliqueFixedResolutionBuffer, \
+    OffAxisProjectionFixedResolutionBuffer
 from .plot_modifications import get_smallest_appropriate_unit, \
     callback_registry
 from .tick_locators import LogLocator, LinearLocator
@@ -101,7 +107,10 @@
         self.pf = frb.pf
         self.xlim = viewer.xlim
         self.ylim = viewer.ylim
-        self._type_name = ''
+        if 'Cutting' in self.data.__class__.__name__:
+            self._type_name = "CuttingPlane"
+        else:
+            self._type_name = ''
 
 class FieldTransform(object):
     def __init__(self, name, func, locator):
@@ -120,7 +129,7 @@
             ticks = []
         return ticks
 
-log_transform = FieldTransform('log10', na.log10, LogLocator())
+log_transform = FieldTransform('log10', np.log10, LogLocator())
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
 
 def GetBoundsAndCenter(axis, center, width, pf, unit='1'):
@@ -152,7 +161,7 @@
               center[y_dict[axis]]+width[1]/2]
     return (bounds,center)
 
-def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1'):
+def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1',depth=None):
     if width == None:
         width = (pf.domain_width.min(),
                  pf.domain_width.min())
@@ -162,7 +171,14 @@
     if not iterable(width):
         width = (width, width)
     Wx, Wy = width
-    width = na.array((Wx/pf[unit], Wy/pf[unit]))
+    width = np.array((Wx/pf[unit], Wy/pf[unit]))
+    if depth != None:
+        if iterable(depth) and isinstance(depth[1],str):
+            d,unit = depth
+            depth = d/pf[unit]
+        elif iterable(depth):
+            raise RuntimeError("Depth must be a float or a (width,\"unit\") tuple")
+        width = np.append(width,depth)
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -171,16 +187,19 @@
         else:
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
 
-    # Transforming to the cutting plane coordinate system
-    center = na.array(center)
-    center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
-    (normal,perp1,perp2) = ortho_find(normal)
-    mat = na.transpose(na.column_stack((perp1,perp2,normal)))
-    center = na.dot(mat,center)
-    width = width/pf.domain_width.min()
+    if width.shape == (2,):
+        # Transforming to the cutting plane coordinate system
+        center = np.array(center)
+        center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
+        (normal,perp1,perp2) = ortho_find(normal)
+        mat = np.transpose(np.column_stack((perp1,perp2,normal)))
+        center = np.dot(mat,center)
+        width = width
+    
+        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
+    else:
+        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
 
-    bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
-    
     return (bounds,center)
 
 class PlotWindow(object):
@@ -189,8 +208,8 @@
     _contour_info = None
     _vector_info = None
     _frb = None
-    def __init__(self, data_source, bounds, buff_size=(800,800), antialias = True, 
-                 periodic = True, origin='center-window', oblique=False):
+    def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
+                 periodic=True, origin='center-window', oblique=False):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -242,20 +261,14 @@
         old_fields = None
         if self._frb is not None:
             old_fields = self._frb.keys()
-        try:
+        if hasattr(self,'zlim'):
+            bounds = self.xlim+self.ylim+self.zlim
+        else:
             bounds = self.xlim+self.ylim
-            if self.oblique == False:
-                self._frb = FixedResolutionBuffer(self.data_source, 
-                                                  bounds, self.buff_size, 
-                                                  self.antialias, 
-                                                  periodic=self._periodic)
-            else:
-                self._frb = ObliqueFixedResolutionBuffer(self.data_source, 
-                                                         bounds, self.buff_size, 
-                                                         self.antialias, 
-                                                         periodic=self._periodic)
-        except:
-            raise RuntimeError("Failed to repixelize.")
+        self._frb = self._frb_generator(self.data_source,
+                                        bounds, self.buff_size,
+                                        self.antialias,
+                                        periodic=self._periodic)
         if old_fields is None:
             self._frb._get_data_source_fields()
         else:
@@ -296,6 +309,7 @@
         nWx, nWy = Wx/factor, Wy/factor
         self.xlim = (centerx - nWx*0.5, centerx + nWx*0.5)
         self.ylim = (centery - nWy*0.5, centery + nWy*0.5)
+                    
 
     @invalidate_data
     def pan(self, deltas):
@@ -342,42 +356,75 @@
             dy = bounds[3] - bounds[2]
             self.xlim = (self.center[0] - dx/2., self.center[0] + dx/2.)
             self.ylim = (self.center[1] - dy/2., self.center[1] + dy/2.)
-            mylog.info("xlim = %f %f" %self.xlim)
-            mylog.info("ylim = %f %f" %self.ylim)
         else:
-            self.xlim = bounds[0:2]
-            self.ylim = bounds[2:]
-            
+            self.xlim = tuple(bounds[0:2])
+            self.ylim = tuple(bounds[2:4])
+            if len(bounds) == 6:
+                self.zlim = tuple(bounds[4:6])
+        mylog.info("xlim = %f %f" %self.xlim)
+        mylog.info("ylim = %f %f" %self.ylim)
+        if hasattr(self,'zlim'):
+            mylog.info("zlim = %f %f" %self.zlim)
+
     @invalidate_data
     def set_width(self, width, unit = '1'):
         """set the width of the plot window
 
         parameters
         ----------
-        width : float, array of floats, or (float, unit) tuple.
-            the width of the image.
+        width : float, array of floats, (float, unit) tuple, or arry of (float, unit) tuples.
+             Width can have four different formats to support windows with variable 
+             x and y widths.  They are:
+             
+             ==================================     =======================
+             format                                 example                
+             ==================================     =======================
+             (float, string)                        (10,'kpc')
+             ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
+             float                                  0.2
+             (float, float)                         (0.2, 0.3)
+             ==================================     =======================
+             
+             For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs 
+             wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
+             that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
+             the y axis.  In the other two examples, code units are assumed, for example
+             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
+             in code units.  the width of the image.
         unit : str
             the unit the width has been specified in.
             defaults to code units.  If width is a tuple this 
             argument is ignored
 
         """
-        if iterable(width) and isinstance(width[1],str):
-            unit = width[1]
-            width = width[0]
-        elif not iterable(width):
-            width = (width,width)
+        if iterable(width): 
+            if isinstance(width[1],str):
+                w, unit = width
+                width = (w, w)
+            elif isinstance(width[1], tuple):
+                wx,unitx = width[0]
+                wy,unity = width[1]
+                width = (wx/self.pf[unitx],wy/self.pf[unity])
+        else:
+            width = (width, width)
         Wx, Wy = width
         width = (Wx,Wy)
         width = [w / self.pf[unit] for w in width]
 
-        centerx = (self.xlim[1] + self.xlim[0])/2 
-        centery = (self.ylim[1] + self.ylim[0])/2 
+        centerx = (self.xlim[1] + self.xlim[0])/2.
+        centery = (self.ylim[1] + self.ylim[0])/2. 
+        
         self.xlim = (centerx - width[0]/2.,
                      centerx + width[0]/2.)
         self.ylim = (centery - width[1]/2.,
                      centery + width[1]/2.)
         
+        if hasattr(self,'zlim'):
+            centerz = (self.zlim[1] + self.zlim[0])/2.
+            mw = max(width)
+            self.zlim = (centerz - mw/2.,
+                         centerz + mw/2.)
+        
     @invalidate_data
     def set_center(self, new_center, unit = '1'):
         """Sets a new center for the plot window
@@ -434,10 +481,11 @@
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
+        self._unit = None
+        self._callbacks = []
+        self._field_transform = {}
         self._colormaps = defaultdict(lambda: 'algae')
         self.setup_callbacks()
-        self._callbacks = []
-        self._field_transform = {}
         for field in self._frb.data.keys():
             finfo = self.data_source._get_field_info(*field)
             if finfo.take_log:
@@ -555,13 +603,52 @@
             callback.__doc__ = CallbackMaker.__init__.__doc__
             self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
 
+    @invalidate_plot
+    def set_axes_unit(self, unit_name):
+        r"""Set the unit for display on the x and y axes of the image.
+
+        Parameters
+        ----------
+        unit_name : string
+            A unit, available for conversion in the parameter file, that the
+            image extents will be displayed in.  If set to None, any previous
+            units will be reset.  If the unit is None, the default is chosen.
+            If unit_name is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
+
+        Raises
+        ------
+        YTUnitNotRecognized
+            If the unit is not known, this will be raised.
+
+        Examples
+        --------
+
+        >>> p = ProjectionPlot(pf, "y", "Density")
+        >>> p.show()
+        >>> p.set_axes_unit("kpc")
+        >>> p.show()
+        >>> p.set_axes_unit(None)
+        >>> p.show()
+        """
+        # blind except because it could be in conversion_factors or units
+        try:
+            self.pf[unit_name]
+        except KeyError: 
+            if unit_name is not None:
+                raise YTUnitNotRecognized(unit_name)
+        self._unit = unit_name
+
     def get_metadata(self, field, strip_mathml = True, return_string = True):
         fval = self._frb[field]
         mi = fval.min()
         ma = fval.max()
         x_width = self.xlim[1] - self.xlim[0]
         y_width = self.ylim[1] - self.ylim[0]
-        unit = get_smallest_appropriate_unit(x_width, self.pf)
+        if self._unit is None:
+            unit = get_smallest_appropriate_unit(x_width, self.pf)
+        else:
+            unit = self._unit
         units = self.get_field_units(field, strip_mathml)
         center = getattr(self._frb.data_source, "center", None)
         if center is None or self._frb.axis == 4:
@@ -611,6 +698,71 @@
 
     """
     _current_field = None
+    _frb_generator = None
+    _plot_type = None
+
+    def __init__(self, *args, **kwargs):
+        if self._frb_generator == None:
+            self._frb_generator = kwargs.pop("frb_generator")
+        if self._plot_type == None:
+            self._plot_type = kwargs.pop("plot_type")
+        PWViewer.__init__(self, *args, **kwargs)
+
+    def _setup_origin(self):
+        origin = self.origin
+        axis_index = self.data_source.axis
+        if isinstance(origin, basestring):
+            origin = tuple(origin.split('-'))[:3]
+        if 1 == len(origin):
+            origin = ('lower', 'left') + origin
+        elif 2 == len(origin) and origin[0] in set(['left','right','center']):
+            o0map = {'left': 'lower', 'right': 'upper', 'center': 'center'}
+            origin = (o0map[origin[0]],) + origin
+        elif 2 == len(origin) and origin[0] in set(['lower','upper','center']):
+            origin = (origin[0], 'center', origin[-1])
+        assert origin[-1] in ['window', 'domain']
+
+        if origin[2] == 'window':
+            xllim, xrlim = self.xlim
+            yllim, yrlim = self.ylim
+        elif origin[2] == 'domain':
+            xllim = self.pf.domain_left_edge[x_dict[axis_index]]
+            xrlim = self.pf.domain_right_edge[x_dict[axis_index]]
+            yllim = self.pf.domain_left_edge[y_dict[axis_index]]
+            yrlim = self.pf.domain_right_edge[y_dict[axis_index]]
+        else:
+            mylog.warn("origin = {0}".format(origin))
+            msg = ('origin keyword "{0}" not recognized, must declare "domain" '
+                   'or "center" as the last term in origin.').format(self.origin)
+            raise RuntimeError(msg)
+
+        if origin[0] == 'lower':
+            yc = yllim
+        elif origin[0] == 'upper':
+            yc = yrlim
+        elif origin[0] == 'center':
+            yc = (yllim + yrlim)/2.0
+        else:
+            mylog.warn("origin = {0}".format(origin))
+            msg = ('origin keyword "{0}" not recognized, must declare "lower" '
+                   '"upper" or "center" as the first term in origin.')
+            msg = msg.format(self.origin)
+            raise RuntimeError(msg)
+
+        if origin[1] == 'left':
+            xc = xllim
+        elif origin[1] == 'right':
+            xc = xrlim
+        elif origin[1] == 'center':
+            xc = (xllim + xrlim)/2.0
+        else:
+            mylog.warn("origin = {0}".format(origin))
+            msg = ('origin keyword "{0}" not recognized, must declare "left" '
+                   '"right" or "center" as the second term in origin.')
+            msg = msg.format(self.origin)
+            raise RuntimeError(msg)
+
+        return xc, yc
 
     def _setup_plots(self):
         if self._current_field is not None:
@@ -622,20 +774,7 @@
             md = self.get_metadata(f, strip_mathml = False, return_string = False)
             axis_index = self.data_source.axis
 
-            if self.origin == 'center-window':
-                xc = (self.xlim[0]+self.xlim[1])/2
-                yc = (self.ylim[0]+self.ylim[1])/2
-            elif self.origin == 'center-domain':
-                xc = (self.pf.domain_left_edge[x_dict[axis_index]]+
-                      self.pf.domain_right_edge[x_dict[axis_index]])/2
-                yc = (self.pf.domain_left_edge[y_dict[axis_index]]+
-                      self.pf.domain_right_edge[y_dict[axis_index]])/2
-            elif self.origin == 'left-domain':
-                xc = self.pf.domain_left_edge[x_dict[axis_index]]
-                yc = self.pf.domain_left_edge[y_dict[axis_index]]
-            else:
-                raise RuntimeError(
-                    'origin keyword: \"%(k)s\" not recognized' % {'k': self.origin})
+            xc, yc = self._setup_origin()
 
             extent = [self.xlim[i] - xc for i in (0,1)]
             extent.extend([self.ylim[i] - yc for i in (0,1)])
@@ -650,33 +789,55 @@
 
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
+            norm_size = 10.0
+            cbar_frac = 0.0
             if aspect > 1.0:
-                size = (10.0, 10.0/aspect)
+                size = (norm_size*(1.+cbar_frac), norm_size/aspect)
             else:
-                size = (10.0*aspect, 10.0)
+                size = (aspect*norm_size*(1.+cbar_frac), norm_size)
 
             self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
             self.plots[f].cb = self.plots[f].figure.colorbar(
                 self.plots[f].image, cax = self.plots[f].cax)
 
+            if not md['unit'] in ['1', 'u', 'unitary']:
+                axes_unit_label = '\/\/('+md['unit']+')'
+            else:
+                axes_unit_label = ''
+
             if self.oblique == False:
-                labels = [r'$\rm{'+axis_labels[axis_index][i].encode('string-escape')+
-                          r'\/\/('+md['unit'].encode('string-escape')+r')}$' for i in (0,1)]
+                labels = [r'$\rm{'+axis_labels[axis_index][i]+
+                        axes_unit_label + r'}$' for i in (0,1)]
             else:
-                labels = [r'$\rm{Image\/x}\/\/\rm{('+md['unit'].encode('string-escape')+r')}$',
-                          r'$\rm{Image\/y}\/\/\rm{('+md['unit'].encode('string-escape')+r')}$']
-                
+                labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
+                          r'$\rm{Image\/y'+axes_unit_label+'}$']
+
             self.plots[f].axes.set_xlabel(labels[0])
             self.plots[f].axes.set_ylabel(labels[1])
 
             ftype, fname = f
             field_name = self.data_source._get_field_info(ftype, fname).display_name
-            if field_name is None: field_name = fname
-            if md['units'] == None or md['units'] == '':
-                label = r'$\rm{'+field_name.encode('string-escape')+r'}$'
+
+            if field_name is None:
+                field_name = r'$\rm{'+fname+r'}$'
+            elif field_name.find('$') == -1:
+                field_name = r'$\rm{'+field_name+r'}$'
+            
+            parser = MathTextParser('Agg')
+            try:
+                parser.parse(field_name)
+            except ParseFatalException, err:
+                raise YTCannotParseFieldDisplayName(fname,field_name,str(err))
+
+            if md['units'] is None or md['units'] == '':
+                label = field_name
             else:
-                label = r'$\rm{'+field_name.encode('string-escape')+r'}\/\/('+md['units']+r')$'
+                try:
+                    parser.parse(r'$'+md['units']+r'$')
+                except ParseFatalException, err:
+                    raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
+                label = field_name+r'$\/\/('+md['units']+r')$'
 
             self.plots[f].cb.set_label(label)
 
@@ -725,7 +886,7 @@
                 raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
             self.plots[field].image.set_cmap(cmap)
 
-    def save(self,name=None):
+    def save(self, name=None, mpl_kwargs=None):
         """saves the plot to disk.
 
         Parameters
@@ -733,24 +894,31 @@
         name : string
            the base of the filename.  If not set the filename of 
            the parameter file is used
+        mpl_kwargs : dict
+           A dict of keyword arguments to be passed to matplotlib.
+           
+        >>> slc.save(mpl_kwargs={'bbox_inches':'tight'})
 
         """
+        names = []
+        if mpl_kwargs is None: mpl_kwargs = {}
         if name == None:
             name = str(self.pf)
-        elif name.endswith('.png'):
-            return v.save(name)
+        suffix = os.path.splitext(name)[1]
+        if suffix != '':
+            for k, v in self.plots.iteritems():
+                names.append(v.save(name,mpl_kwargs))
+            return names
         axis = axis_names[self.data_source.axis]
         weight = None
-        if 'Slice' in self.data_source.__class__.__name__:
-            type = 'Slice'
-        if 'Proj' in self.data_source.__class__.__name__:
-            type = 'Projection'
+        type = self._plot_type
+        if type in ['Projection','OffAxisProjection']:
             weight = self.data_source.weight_field
         if 'Cutting' in self.data_source.__class__.__name__:
             type = 'OffAxisSlice'
-        names = []
         for k, v in self.plots.iteritems():
-            if isinstance(k, types.TupleType): k = k[1]
+            if isinstance(k, types.TupleType):
+                k = k[1]
             if axis:
                 n = "%s_%s_%s_%s" % (name, type, axis, k)
             else:
@@ -758,15 +926,19 @@
                 n = "%s_%s_%s" % (name, type, k)
             if weight:
                 n += "_%s" % (weight)
-            names.append(v.save(n))
+            names.append(v.save(n, mpl_kwargs))
         return names
 
     def _send_zmq(self):
-        from IPython.zmq.pylab.backend_inline import \
-                    send_figure
+        try:
+            # pre-IPython v0.14
+            from IPython.zmq.pylab.backend_inline import send_figure as display
+        except ImportError:
+            # IPython v0.14+ 
+            from IPython.core.display import display
         for k, v in sorted(self.plots.iteritems()):
             canvas = FigureCanvasAgg(v.figure)
-            send_figure(v.figure)
+            display(v.figure)
 
     def show(self):
         r"""This will send any existing plots to the IPython notebook.
@@ -791,8 +963,20 @@
         else:
             raise YTNotInsideNotebook
 
+    def show_or_save(self, name=None):
+        """Will attempt to show the plot in in an IPython notebook.  Failing that, the 
+        plot will be saved to disk."""
+        try:
+            self.show()
+        except YTNotInsideNotebook:
+            self.save(name=name)
+
 class SlicePlot(PWViewerMPL):
-    def __init__(self, pf, axis, fields, center='c', width=None, origin='center-window'):
+    _plot_type = 'Slice'
+    _frb_generator = FixedResolutionBuffer
+
+    def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
+                 origin='center-window'):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -812,11 +996,12 @@
              or the axis name itself
         fields : string
              The name of the field(s) to be plotted.
-        center : two or three-element vector of sequence floats, 'c', or 'center'
+        center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
              The coordinate of the center of the image.  If left blanck,
              the image centers on the location of the maximum density
              cell.  If set to 'c' or 'center', the plot is centered on
-             the middle of the domain.
+             the middle of the domain.  If set to 'max', will be at the point
+             of highest density.
         width : tuple or a float.
              Width can have four different formats to support windows with variable 
              x and y widths.  They are:
@@ -836,12 +1021,37 @@
              the y axis.  In the other two examples, code units are assumed, for example
              (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
              in code units.  
-        origin : string
-             The location of the origin of the plot coordinate system.
-             Currently, can be set to three options: 'left-domain', corresponding
-             to the bottom-left hand corner of the simulation domain, 'center-domain',
-             corresponding the center of the simulation domain, or 'center-window' for 
-             the center of the plot window.
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
+        origin : string or length 1, 2, or 3 sequence of strings
+             The location of the origin of the plot coordinate system.  This is 
+             represented by '-' separated string or a tuple of strings.  In the
+             first index the y-location is given by 'lower', 'upper', or 'center'.
+             The second index is the x-location, given as 'left', 'right', or 
+             'center'.  Finally, the whether the origin is applied in 'domain' space
+             or plot 'window' space is given. For example, both 'upper-right-domain'
+             and ['upper', 'right', 'domain'] both place the origin in the upper
+             right hand corner of domain space. If x or y are not given, a value is 
+             inffered.  For instance, 'left-domain' corresponds to the lower-left 
+             hand corner of the simulation domain, 'center-domain' corresponds to the 
+             center of the simulation domain, or 'center-window' for the center of 
+             the plot window.  Further examples:
+
+             ==================================     ============================
+             format                                 example                
+             ==================================     ============================
+             '{space}'                              'domain'
+             '{xloc}-{space}'                       'left-window'
+             '{yloc}-{space}'                       'upper-domain'
+             '{yloc}-{xloc}-{space}'                'lower-right-window'
+             ('{space}',)                           ('window',)
+             ('{xloc}', '{space}')                  ('right', 'domain')
+             ('{yloc}', '{space}')                  ('lower', 'window')
+             ('{yloc}', '{xloc}', '{space}')        ('lower', 'right', 'window')
+             ==================================     ============================
              
         Examples
         --------
@@ -858,9 +1068,13 @@
         slc = pf.h.slice(axis, center[axis])
         slc.get_data(fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
+        self.set_axes_unit(axes_unit)
 
 class ProjectionPlot(PWViewerMPL):
-    def __init__(self, pf, axis, fields, center='c', width=None,
+    _plot_type = 'Projection'
+    _frb_generator = FixedResolutionBuffer
+
+    def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  weight_field=None, max_level=None, origin='center-window'):
         r"""Creates a projection plot from a parameter file
         
@@ -881,11 +1095,12 @@
              or the axis name itself
         fields : string
             The name of the field(s) to be plotted.
-        center : A two or three-element vector of sequence floats, 'c', or 'center'
-            The coordinate of the center of the image.  If left blanck,
-            the image centers on the location of the maximum density
-            cell.  If set to 'c' or 'center', the plot is centered on
-            the middle of the domain.
+        center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
+             The coordinate of the center of the image.  If left blanck,
+             the image centers on the location of the maximum density
+             cell.  If set to 'c' or 'center', the plot is centered on
+             the middle of the domain.  If set to 'max', will be at the point
+             of highest density.
         width : tuple or a float.
              Width can have four different formats to support windows with variable 
              x and y widths.  They are:
@@ -905,12 +1120,38 @@
              the y axis.  In the other two examples, code units are assumed, for example
              (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
              in code units.
-        origin : A string
-            The location of the origin of the plot coordinate system.
-            Currently, can be set to three options: 'left-domain', corresponding
-            to the bottom-left hand corner of the simulation domain, 'center-domain',
-            corresponding the center of the simulation domain, or 'center-window' for 
-            the center of the plot window.
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
+        origin : string or length 1, 2, or 3 sequence of strings
+             The location of the origin of the plot coordinate system.  This is 
+             represented by '-' separated string or a tuple of strings.  In the
+             first index the y-location is given by 'lower', 'upper', or 'center'.
+             The second index is the x-location, given as 'left', 'right', or 
+             'center'.  Finally, the whether the origin is applied in 'domain' space
+             or plot 'window' space is given. For example, both 'upper-right-domain'
+             and ['upper', 'right', 'domain'] both place the origin in the upper
+             right hand corner of domain space. If x or y are not given, a value is 
+             inffered.  For instance, 'left-domain' corresponds to the lower-left 
+             hand corner of the simulation domain, 'center-domain' corresponds to the 
+             center of the simulation domain, or 'center-window' for the center of 
+             the plot window.Further examples:
+
+             ==================================     ============================
+             format                                 example
+             ==================================     ============================ 
+             '{space}'                              'domain'
+             '{xloc}-{space}'                       'left-window'
+             '{yloc}-{space}'                       'upper-domain'
+             '{yloc}-{xloc}-{space}'                'lower-right-window'
+             ('{space}',)                           ('window',)
+             ('{xloc}', '{space}')                  ('right', 'domain')
+             ('{yloc}', '{space}')                  ('lower', 'window')
+             ('{yloc}', '{xloc}', '{space}')        ('lower', 'right', 'window')
+             ==================================     ============================
+             
         weight_field : string
             The name of the weighting field.  Set to None for no weight.
         max_level: int
@@ -930,9 +1171,14 @@
         (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
         proj = pf.h.proj(fields, axis, weight_field=weight_field, center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
+        self.set_axes_unit(axes_unit)
 
 class OffAxisSlicePlot(PWViewerMPL):
-    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), north_vector=None):
+    _plot_type = 'OffAxisSlice'
+    _frb_generator = ObliqueFixedResolutionBuffer
+
+    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+                 axes_unit=None, north_vector=None):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -960,6 +1206,11 @@
             A tuple containing the width of image and the string key of
             the unit: (width, 'unit').  If set to a float, code units
             are assumed
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
         north-vector : a sequence of floats
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
@@ -971,6 +1222,96 @@
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
+        self.set_axes_unit(axes_unit)
+
+class OffAxisProjectionDummyDataSource(object):
+    _type_name = 'proj'
+    proj_style = 'integrate'
+    _key_fields = []
+    def __init__(self, center, pf, normal_vector, width, fields, 
+                 interpolated, resolution = (800,800), weight=None,  
+                 volume=None, no_ghost=False, le=None, re=None, 
+                 north_vector=None):
+        self.center = center
+        self.pf = pf
+        self.axis = 4 # always true for oblique data objects
+        self.normal_vector = normal_vector
+        self.width = width
+        self.fields = fields
+        self.interpolated = interpolated
+        self.resolution = resolution
+        self.weight_field = weight
+        self.volume = volume
+        self.no_ghost = no_ghost
+        self.le = le
+        self.re = re
+        self.north_vector = north_vector
+
+class OffAxisProjectionPlot(PWViewerMPL):
+    _plot_type = 'OffAxisProjection'
+    _frb_generator = OffAxisProjectionFixedResolutionBuffer
+
+    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+                 depth=(1,'unitary'), axes_unit=None, weight_field=None, 
+                 max_level=None, north_vector=None, volume=None, no_ghost=False, 
+                 le=None, re=None, interpolated=False):
+        r"""Creates an off axis projection plot from a parameter file
+
+        Given a pf object, a normal vector to project along, and
+        a field name string, this will return a PWViewrMPL object
+        containing the plot.
+        
+        The plot can be updated using one of the many helper functions
+        defined in PlotWindow.
+
+        Parameters
+        ----------
+        pf : :class:`yt.data_objects.api.StaticOutput`
+            This is the parameter file object corresponding to the
+            simulation output to be plotted.
+        normal : a sequence of floats
+            The vector normal to the slicing plane.
+        fields : string
+            The name of the field(s) to be plotted.
+        center : A two or three-element vector of sequence floats, 'c', or 'center'
+            The coordinate of the center of the image.  If left blanck,
+            the image centers on the location of the maximum density
+            cell.  If set to 'c' or 'center', the plot is centered on
+            the middle of the domain.
+        width : A tuple or a float
+            A tuple containing the width of image and the string key of
+            the unit: (width, 'unit').  If set to a float, code units
+            are assumed
+        depth : A tuple or a float
+            A tuple containing the depth to project thourhg and the string
+            key of the unit: (width, 'unit').  If set to a float, code units
+            are assumed
+        weight_field : string
+            The name of the weighting field.  Set to None for no weight.
+        max_level: int
+            The maximum level to project to.
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
+        north-vector : a sequence of floats
+            A vector defining the 'up' direction in the plot.  This
+            option sets the orientation of the slicing plane.  If not
+            set, an arbitrary grid-aligned north-vector is chosen.
+
+        """
+        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
+        # Hard-coding the resolution for now
+        fields = ensure_list(fields)[:]
+        width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
+        OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
+                                                       weight=weight_field,  volume=volume, no_ghost=no_ghost,
+                                                       le=le, re=re, north_vector=north_vector)
+        # Hard-coding the origin keyword since the other two options
+        # aren't well-defined for off-axis data objects
+        PWViewerMPL.__init__(self,OffAxisProj,bounds,origin='center-window',periodic=False,oblique=True)
+        self.set_axes_unit(axes_unit)
 
 _metadata_template = """
 %(pf)s<br>
@@ -1015,7 +1356,7 @@
             img_data = base64.b64encode(pngs)
             # We scale the width between 200*min_dx and 1.0
             x_width = self.xlim[1] - self.xlim[0]
-            zoom_fac = na.log10(x_width*self.pf['unitary'])/na.log10(min_zoom)
+            zoom_fac = np.log10(x_width*self.pf['unitary'])/np.log10(min_zoom)
             zoom_fac = 100.0*max(0.0, zoom_fac)
             ticks = self.get_ticks(field)
             payload = {'type':'png_string',
@@ -1059,12 +1400,12 @@
 
         raw_data = self._frb.data_source
         b = self._frb.bounds
-        xi, yi = na.mgrid[b[0]:b[1]:(vi / 8) * 1j,
+        xi, yi = np.mgrid[b[0]:b[1]:(vi / 8) * 1j,
                           b[2]:b[3]:(vj / 8) * 1j]
         x = raw_data['px']
         y = raw_data['py']
         z = raw_data[field]
-        if logit: z = na.log10(z)
+        if logit: z = np.log10(z)
         fvals = triang(x,y).nn_interpolator(z)(xi,yi).transpose()[::-1,:]
 
         ax.contour(fvals, number, colors='w')
@@ -1083,8 +1424,8 @@
         fy = "%s-velocity" % (axis_names[y_dict[axis]])
         px = new_frb[fx][::-1,:]
         py = new_frb[fy][::-1,:]
-        x = na.mgrid[0:vi-1:ny*1j]
-        y = na.mgrid[0:vj-1:nx*1j]
+        x = np.mgrid[0:vi-1:ny*1j]
+        y = np.mgrid[0:vj-1:nx*1j]
         # Always normalize, then we scale
         nn = ((px**2.0 + py**2.0)**0.5).max()
         px /= nn
@@ -1108,7 +1449,7 @@
     def _get_cbar_image(self, height = 400, width = 40, field = None):
         if field is None: field = self._current_field
         cmap_name = self._colormaps[field]
-        vals = na.mgrid[1:0:height * 1j] * na.ones(width)[:,None]
+        vals = np.mgrid[1:0:height * 1j] * np.ones(width)[:,None]
         vals = vals.transpose()
         to_plot = apply_colormap(vals, cmap_name = cmap_name)
         pngs = write_png_to_string(to_plot)
@@ -1152,30 +1493,81 @@
     figure = None
     def __init__(self, field, size):
         self._plot_valid = True
-        self.figure = matplotlib.figure.Figure(figsize = size, frameon = True)
-        # Hardcoding the axis dimensions for now
-        self.axes = self.figure.add_axes((.07,.10,.8,.8))
-        self.cax = self.figure.add_axes((.87,.10,.04,.8))
+        fsize, axrect, caxrect = self._get_best_layout(size)
+        
+        if np.any(np.array(axrect) < 0):
+            self.figure = matplotlib.figure.Figure(figsize = size, 
+                                                   frameon = True)
+            self.axes = self.figure.add_axes((.07,.10,.8,.8))
+            self.cax = self.figure.add_axes((.87,.10,.04,.8))
+            mylog.warning('The axis ratio of the requested plot is very narrow.  '
+                          'There is a good chance the plot will not look very good, '
+                          'consider making the plot manually using FixedResolutionBuffer '
+                          'and matplotlib.')
+        else:
+            self.figure = matplotlib.figure.Figure(figsize = fsize, 
+                                                   frameon = True)
+            self.axes = self.figure.add_axes(axrect)
+            self.cax = self.figure.add_axes(caxrect)
+            
+    def save(self, name, mpl_kwargs, canvas = None):
+        suffix = os.path.splitext(name)[1]
+        
+        if suffix == '':
+            suffix = '.png'
+            name = "%s%s" % (name, suffix)
+        mylog.info("Saving plot %s", name)
+        if suffix == ".png":
+            canvas = FigureCanvasAgg(self.figure)
+        elif suffix == ".pdf":
+            canvas = FigureCanvasPdf(self.figure)
+        elif suffix in (".eps", ".ps"):
+            canvas = FigureCanvasPS(self.figure)
+        else:
+            mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
+            canvas = FigureCanvasAgg(self.figure)
 
-    def save(self, name, canvas = None):
-        if name[-4:] == '.png':
-            suffix = ''
-        else:
-            suffix = '.png'
-        fn = "%s%s" % (name, suffix)
-        mylog.info("Saving plot %s", fn)
-        if canvas is None:
-            if suffix == ".png":
-                canvas = FigureCanvasAgg(self.figure)
-            elif suffix == ".pdf":
-                canvas = FigureCanvasPdf(self.figure)
-            elif suffix in (".eps", ".ps"):
-                canvas = FigureCanvasPS
-            else:
-                mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
-                canvas = FigureCanvasAgg(self.figure)
-        canvas.print_figure(fn, bbox_inches='tight')
-        return fn
+
+        canvas.print_figure(name,**mpl_kwargs)
+        return name
+
+    def _get_best_layout(self, size):
+        aspect = 1.0*size[0]/size[1]
+
+        # add room for a colorbar
+        cbar_inches = 0.7
+        newsize = [size[0] + cbar_inches, size[1]]
+        
+        # add buffers for text, and a bit of whitespace on top
+        text_buffx = 1.0/(newsize[0])
+        text_bottomy = 0.7/size[1]
+        text_topy = 0.3/size[1]
+
+        # calculate how much room the colorbar takes
+        cbar_frac = cbar_inches/newsize[0] 
+        
+        # Calculate y fraction, then use to make x fraction.
+        yfrac = 1.0-text_bottomy-text_topy
+        ysize = yfrac*size[1]
+        xsize = aspect*ysize
+        xfrac = xsize/newsize[0]
+
+        # Now make sure it all fits!
+        xbig = xfrac + text_buffx + 2.0*cbar_frac
+        ybig = yfrac + text_bottomy + text_topy
+
+        if xbig > 1:
+            xsize /= xbig
+            ysize /= xbig
+        if ybig > 1:
+            xsize /= ybig
+            ysize /= ybig
+        xfrac = xsize/newsize[0]
+        yfrac = ysize/newsize[1]
+
+        axrect = (text_buffx, text_bottomy, xfrac, yfrac )
+        caxrect = (text_buffx+xfrac, text_bottomy, cbar_frac/4., yfrac )
+        return newsize, axrect, caxrect
 
     def _repr_png_(self):
         canvas = FigureCanvasAgg(self.figure)
@@ -1198,3 +1590,5 @@
         self.image = self.axes.imshow(data, origin='lower', extent = extent,
                                       norm = norm, vmin = self.zmin, 
                                       vmax = self.zmax, cmap = cmap)
+        self.image.axes.ticklabel_format(scilimits=(-4,3))
+


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -27,7 +27,7 @@
 import types
 
 from functools import wraps
-import numpy as na
+import numpy as np
 
 from .image_writer import \
     write_image, apply_colormap
@@ -129,19 +129,19 @@
         use_mesh = False
         xmi, xma = self.x_spec.bounds
         if self.x_spec.scale == 'log':
-            x_bins = na.logspace(na.log10(xmi), na.log10(xma),
+            x_bins = np.logspace(np.log10(xmi), np.log10(xma),
                                  self.image.shape[0]+1)
             use_mesh = True
         else:
-            x_bins = na.logspace(xmi, xma, self.image.shape[0]+1)
+            x_bins = np.logspace(xmi, xma, self.image.shape[0]+1)
 
         ymi, yma = self.y_spec.bounds
         if self.y_spec.scale == 'log':
-            y_bins = na.logspace(na.log10(ymi), na.log10(yma),
+            y_bins = np.logspace(np.log10(ymi), np.log10(yma),
                                  self.image.shape[0]+1)
             use_mesh = True
         else:
-            y_bins = na.logspace(ymi, yma, self.image.shape[0]+1)
+            y_bins = np.logspace(ymi, yma, self.image.shape[0]+1)
 
         im = self.image
         if self.cbar.scale == 'log':
@@ -298,7 +298,7 @@
             nz = (self.profile[self._current_field] > 0)
             mi = self.profile[self._current_field][nz].min()
         else:
-            mi = self.profile[self._current_field][nz].min()
+            mi = self.profile[self._current_field].min()
         ma = self.profile[self._current_field].max()
         cbar.bounds = (mi, ma)
         cbar.cmap = 'algae'
@@ -330,11 +330,11 @@
         raw_data = self.plot.image[::-1,:]
 
         if self.plot.cbar.scale == 'log':
-            func = na.log10
+            func = np.log10
         else:
             func = lambda a: a
-        raw_data = na.repeat(raw_data, 3, axis=0)
-        raw_data = na.repeat(raw_data, 3, axis=1)
+        raw_data = np.repeat(raw_data, 3, axis=0)
+        raw_data = np.repeat(raw_data, 3, axis=1)
         to_plot = apply_colormap(raw_data, self.plot.cbar.bounds,
                                  self.plot.cbar.cmap, func)
         if self.plot.cbar.scale == 'log':
@@ -361,7 +361,7 @@
 
     def _convert_axis(self, spec):
         func = lambda a: a
-        if spec.scale == 'log': func = na.log10
+        if spec.scale == 'log': func = np.log10
         tick_info = self._convert_ticks(spec.ticks, spec.bounds, func)
         ax = {'ticks':tick_info,
               'title': spec.title}
@@ -370,7 +370,7 @@
     def _get_cbar_image(self, height = 400, width = 40):
         # Right now there's just the single 'cmap', but that will eventually
         # change.  I think?
-        vals = na.mgrid[1:0:height * 1j] * na.ones(width)[:,None]
+        vals = np.mgrid[1:0:height * 1j] * np.ones(width)[:,None]
         vals = vals.transpose()
         to_plot = apply_colormap(vals)
         pngs = write_png_to_string(to_plot)


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.data_objects.construction_data_containers import YTStreamlineBase
 from yt.funcs import *
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -61,7 +61,7 @@
         Default: minimum dx
     length : float, optional
         Optionally specify the length of integration.  
-        Default: na.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
+        Default: np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
     direction : real, optional
         Specifies the direction of integration.  The magnitude of this
         value has no effect, only the sign.
@@ -77,10 +77,10 @@
     >>> from yt.visualization.api import Streamlines
     >>> pf = load('DD1701') # Load pf
 
-    >>> c = na.array([0.5]*3)
+    >>> c = np.array([0.5]*3)
     >>> N = 100
     >>> scale = 1.0
-    >>> pos_dx = na.random.random((N,3))*scale-scale/2.
+    >>> pos_dx = np.random.random((N,3))*scale-scale/2.
     >>> pos = c+pos_dx
     
     >>> streamlines = Streamlines(pf,pos,'x-velocity', 'y-velocity', 'z-velocity', length=1.0) 
@@ -91,7 +91,7 @@
     >>> fig=pl.figure() 
     >>> ax = Axes3D(fig)
     >>> for stream in streamlines.streamlines:
-    >>>     stream = stream[na.all(stream != 0.0, axis=1)]
+    >>>     stream = stream[np.all(stream != 0.0, axis=1)]
     >>>     ax.plot3D(stream[:,0], stream[:,1], stream[:,2], alpha=0.1)
     >>> pl.savefig('streamlines.png')
     """
@@ -101,13 +101,13 @@
                  get_magnitude=False):
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
-        self.start_positions = na.array(positions)
+        self.start_positions = np.array(positions)
         self.N = self.start_positions.shape[0]
         self.xfield = xfield
         self.yfield = yfield
         self.zfield = zfield
         self.get_magnitude=get_magnitude
-        self.direction = na.sign(direction)
+        self.direction = np.sign(direction)
         if volume is None:
             volume = AMRKDTree(self.pf, fields=[self.xfield,self.yfield,self.zfield],
                             log_fields=[False,False,False], merge_trees=True)
@@ -116,13 +116,13 @@
             dx = self.pf.h.get_smallest_dx()
         self.dx = dx
         if length is None:
-            length = na.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
+            length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
         self.steps = int(length/dx)
-        self.streamlines = na.zeros((self.N,self.steps,3), dtype='float64')
+        self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:
-            self.magnitudes = na.zeros((self.N,self.steps), dtype='float64')
+            self.magnitudes = np.zeros((self.N,self.steps), dtype='float64')
         
     def integrate_through_volume(self):
         nprocs = self.comm.size
@@ -161,21 +161,21 @@
                 brick.integrate_streamline(stream[-step+1], self.direction*self.dx, marr)
                 mag[-step+1] = marr[0]
                 
-            if na.any(stream[-step+1,:] <= self.pf.domain_left_edge) | \
-                   na.any(stream[-step+1,:] >= self.pf.domain_right_edge):
+            if np.any(stream[-step+1,:] <= self.pf.domain_left_edge) | \
+                   np.any(stream[-step+1,:] >= self.pf.domain_right_edge):
                 return 0
 
-            if na.any(stream[-step+1,:] < node.l_corner) | \
-                   na.any(stream[-step+1,:] >= node.r_corner):
+            if np.any(stream[-step+1,:] < node.l_corner) | \
+                   np.any(stream[-step+1,:] >= node.r_corner):
                 return step-1
             step -= 1
         return step
 
     def clean_streamlines(self):
-        temp = na.empty(self.N, dtype='object')
-        temp2 = na.empty(self.N, dtype='object')
+        temp = np.empty(self.N, dtype='object')
+        temp2 = np.empty(self.N, dtype='object')
         for i,stream in enumerate(self.streamlines):
-            mask = na.all(stream != 0.0, axis=1)
+            mask = np.all(stream != 0.0, axis=1)
             temp[i] = stream[mask]
             temp2[i] = self.magnitudes[i,mask]
         self.streamlines = temp


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/tick_locators.py
--- a/yt/visualization/tick_locators.py
+++ b/yt/visualization/tick_locators.py
@@ -5,7 +5,7 @@
 ##
 
 import math
-import numpy as na
+import numpy as np
 
 def is_decade(x,base=10):
     if x == 0.0:
@@ -40,7 +40,7 @@
         if subs is None:
             self._subs = None  # autosub
         else:
-            self._subs = na.asarray(subs)+0.0
+            self._subs = np.asarray(subs)+0.0
 
     def _set_numticks(self):
         self.numticks = 15  # todo; be smart here; this is just for dev
@@ -62,9 +62,9 @@
         numdec = math.floor(vmax)-math.ceil(vmin)
 
         if self._subs is None: # autosub
-            if numdec>10: subs = na.array([1.0])
-            elif numdec>6: subs = na.arange(2.0, b, 2.0)
-            else: subs = na.arange(2.0, b)
+            if numdec>10: subs = np.array([1.0])
+            elif numdec>6: subs = np.arange(2.0, b, 2.0)
+            else: subs = np.arange(2.0, b)
         else:
             subs = self._subs
 
@@ -72,7 +72,7 @@
         while numdec/stride+1 > self.numticks:
             stride += 1
 
-        decades = na.arange(math.floor(vmin),
+        decades = np.arange(math.floor(vmin),
                              math.ceil(vmax)+stride, stride)
         if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
             ticklocs = []
@@ -81,7 +81,7 @@
         else:
             ticklocs = b**decades
 
-        return na.array(ticklocs)
+        return np.array(ticklocs)
 
 
 class LinearLocator(object):
@@ -122,7 +122,7 @@
 
 
         if self.numticks==0: return []
-        ticklocs = na.linspace(vmin, vmax, self.numticks)
+        ticklocs = np.linspace(vmin, vmax, self.numticks)
 
         #return self.raise_if_exceeds(ticklocs)
         return ticklocs


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/volume_rendering/CUDARayCast.py
--- a/yt/visualization/volume_rendering/CUDARayCast.py
+++ b/yt/visualization/volume_rendering/CUDARayCast.py
@@ -27,7 +27,7 @@
 
 from yt.mods import *
 import yt.extensions.HierarchySubset as hs
-import numpy as na
+import numpy as np
 import h5py, time
 
 import matplotlib;matplotlib.use("Agg");import pylab
@@ -62,7 +62,7 @@
 
     print "Constructing transfer function."
     if "Data" in fn:
-        mh = na.log10(1.67e-24)
+        mh = np.log10(1.67e-24)
         tf = ColorTransferFunction((7.5+mh, 14.0+mh))
         tf.add_gaussian( 8.25+mh, 0.002, [0.2, 0.2, 0.4, 0.1])
         tf.add_gaussian( 9.75+mh, 0.002, [0.0, 0.0, 0.3, 0.1])
@@ -77,17 +77,17 @@
         tf.add_gaussian(-28.5, 0.05, [1.0, 1.0, 1.0, 1.0])
     else: raise RuntimeError
 
-    cpu['ngrids'] = na.array([cpu['dims'].shape[0]], dtype='int32')
+    cpu['ngrids'] = np.array([cpu['dims'].shape[0]], dtype='int32')
     cpu['tf_r'] = tf.red.y.astype("float32")
     cpu['tf_g'] = tf.green.y.astype("float32")
     cpu['tf_b'] = tf.blue.y.astype("float32")
     cpu['tf_a'] = tf.alpha.y.astype("float32")
 
-    cpu['tf_bounds'] = na.array(tf.x_bounds, dtype='float32')
+    cpu['tf_bounds'] = np.array(tf.x_bounds, dtype='float32')
 
-    cpu['v_dir'] = na.array([0.3, 0.5, 0.6], dtype='float32')
+    cpu['v_dir'] = np.array([0.3, 0.5, 0.6], dtype='float32')
 
-    c = na.array([0.47284317, 0.48062515, 0.58282089], dtype='float32')
+    c = np.array([0.47284317, 0.48062515, 0.58282089], dtype='float32')
 
     print "Getting cutting plane."
     cp = pf.h.cutting(cpu['v_dir'], c)
@@ -98,16 +98,16 @@
     back_c = c - cp._norm_vec * W
     front_c = c + cp._norm_vec * W
 
-    px, py = na.mgrid[-W:W:Nvec*1j,-W:W:Nvec*1j]
+    px, py = np.mgrid[-W:W:Nvec*1j,-W:W:Nvec*1j]
     xv = cp._inv_mat[0,0]*px + cp._inv_mat[0,1]*py + cp.center[0]
     yv = cp._inv_mat[1,0]*px + cp._inv_mat[1,1]*py + cp.center[1]
     zv = cp._inv_mat[2,0]*px + cp._inv_mat[2,1]*py + cp.center[2]
-    cpu['v_pos'] = na.array([xv, yv, zv], dtype='float32').transpose()
+    cpu['v_pos'] = np.array([xv, yv, zv], dtype='float32').transpose()
 
-    cpu['image_r'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_g'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_b'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_a'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_r'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_g'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_b'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_a'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
 
     print "Generating module"
     source = open("yt/extensions/volume_rendering/_cuda_caster.cu").read()
@@ -161,7 +161,7 @@
         pylab.imshow(image[-1], interpolation='nearest')
         pylab.savefig("/u/ki/mturk/public_html/vr6/%s.png" % (ii))
 
-    image = na.array(image).transpose()
+    image = np.array(image).transpose()
     image = (image - mi) / (ma - mi)
     pylab.clf()
     pylab.imshow(image, interpolation='nearest')


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/volume_rendering/UBVRI.py
--- a/yt/visualization/volume_rendering/UBVRI.py
+++ b/yt/visualization/volume_rendering/UBVRI.py
@@ -24,21 +24,21 @@
 """
 
 
-import numpy as na
+import numpy as np
 
 johnson_filters = dict(
     B = dict(
-      wavelen = na.array([3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
+      wavelen = np.array([3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
         4050, 4100, 4150, 4200, 4250, 4300, 4350, 4400, 4450, 4500, 4550, 4600,
         4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000, 5050, 5100, 5150, 5200,
         5250, 5300, 5350, 5400, 5450, 5500, 5550], dtype='float64'),
-      trans = na.array([0.0, 0.0, 0.02, 0.05, 0.11, 0.18, 0.35, 0.55, 0.92,
+      trans = np.array([0.0, 0.0, 0.02, 0.05, 0.11, 0.18, 0.35, 0.55, 0.92,
         0.95, 0.98, 0.99, 1.0, 0.99, 0.98, 0.96, 0.94, 0.91, 0.87, 0.83, 0.79,
         0.74, 0.69, 0.63, 0.58, 0.52, 0.46, 0.41, 0.36, 0.3, 0.25, 0.2, 0.15,
         0.12, 0.09, 0.06, 0.04, 0.02, 0.01, 0.0, ], dtype='float64'),
       ),
     I = dict(
-      wavelen = na.array([ 6800, 6850, 6900, 6950, 7000, 7050, 7100,
+      wavelen = np.array([ 6800, 6850, 6900, 6950, 7000, 7050, 7100,
         7150, 7200, 7250, 7300, 7350, 7400, 7450, 7500, 7550, 7600, 7650, 7700,
         7750, 7800, 7850, 7900, 7950, 8000, 8050, 8100, 8150, 8200, 8250, 8300,
         8350, 8400, 8450, 8500, 8550, 8600, 8650, 8700, 8750, 8800, 8850, 8900,
@@ -48,7 +48,7 @@
         10600, 10650, 10700, 10750, 10800, 10850, 10900, 10950, 11000, 11050,
         11100, 11150, 11200, 11250, 11300, 11350, 11400, 11450, 11500, 11550,
         11600, 11650, 11700, 11750, 11800, 11850, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.0, 0.01, 0.01, 0.01, 0.04, 0.08, 0.13, 0.17,
+      trans = np.array([ 0.0, 0.0, 0.01, 0.01, 0.01, 0.04, 0.08, 0.13, 0.17,
         0.21, 0.26, 0.3, 0.36, 0.4, 0.44, 0.49, 0.56, 0.6, 0.65, 0.72, 0.76,
         0.84, 0.9, 0.93, 0.96, 0.97, 0.97, 0.98, 0.98, 0.99, 0.99, 0.99, 0.99,
         1.0, 1.0, 1.0, 1.0, 1.0, 0.99, 0.98, 0.98, 0.97, 0.96, 0.94, 0.93, 0.9,
@@ -59,7 +59,7 @@
         0.02, 0.02, 0.02, 0.02, 0.01, 0.01, 0.01, 0.0, ], dtype='float64'),
       ),
     R = dict(
-      wavelen = na.array([ 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
+      wavelen = np.array([ 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
         5650, 5700, 5750, 5800, 5850, 5900, 5950, 6000, 6050, 6100, 6150, 6200,
         6250, 6300, 6350, 6400, 6450, 6500, 6550, 6600, 6650, 6700, 6750, 6800,
         6850, 6900, 6950, 7000, 7050, 7100, 7150, 7200, 7250, 7300, 7350, 7400,
@@ -67,7 +67,7 @@
         8050, 8100, 8150, 8200, 8250, 8300, 8350, 8400, 8450, 8500, 8550, 8600,
         8650, 8700, 8750, 8800, 8850, 8900, 8950, 9000, 9050, 9100, 9150, 9200,
         9250, 9300, 9350, 9400, 9450, 9500, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.01, 0.02, 0.04, 0.06, 0.11, 0.18, 0.23, 0.28,
+      trans = np.array([ 0.0, 0.01, 0.02, 0.04, 0.06, 0.11, 0.18, 0.23, 0.28,
         0.34, 0.4, 0.46, 0.5, 0.55, 0.6, 0.64, 0.69, 0.71, 0.74, 0.77, 0.79,
         0.81, 0.84, 0.86, 0.88, 0.9, 0.91, 0.92, 0.94, 0.95, 0.96, 0.97, 0.98,
         0.99, 0.99, 1.0, 1.0, 0.99, 0.98, 0.96, 0.94, 0.92, 0.9, 0.88, 0.85,
@@ -77,20 +77,20 @@
         0.02, 0.01, 0.01, 0.01, 0.01, 0.0, ], dtype='float64'),
       ),
     U = dict(
-      wavelen = na.array([ 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, 3400,
+      wavelen = np.array([ 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, 3400,
         3450, 3500, 3550, 3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
         4050, 4100, 4150, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.04, 0.1, 0.25, 0.61, 0.75, 0.84, 0.88, 0.93,
+      trans = np.array([ 0.0, 0.04, 0.1, 0.25, 0.61, 0.75, 0.84, 0.88, 0.93,
         0.95, 0.97, 0.99, 1.0, 0.99, 0.97, 0.92, 0.73, 0.56, 0.36, 0.23, 0.05,
         0.03, 0.01, 0.0, ], dtype='float64'),),
     V = dict(
-      wavelen = na.array([ 4600, 4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000,
+      wavelen = np.array([ 4600, 4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000,
         5050, 5100, 5150, 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
         5650, 5700, 5750, 5800, 5850, 5900, 5950, 6000, 6050, 6100, 6150, 6200,
         6250, 6300, 6350, 6400, 6450, 6500, 6550, 6600, 6650, 6700, 6750, 6800,
         6850, 6900, 6950, 7000, 7050, 7100, 7150, 7200, 7250, 7300, 7350, ],
           dtype='float64'),
-      trans = na.array([ 0.0, 0.0, 0.01, 0.01, 0.02, 0.05, 0.11, 0.2, 0.38,
+      trans = np.array([ 0.0, 0.0, 0.01, 0.01, 0.02, 0.05, 0.11, 0.2, 0.38,
         0.67, 0.78, 0.85, 0.91, 0.94, 0.96, 0.98, 0.98, 0.95, 0.87, 0.79, 0.72,
         0.71, 0.69, 0.65, 0.62, 0.58, 0.52, 0.46, 0.4, 0.34, 0.29, 0.24, 0.2,
         0.17, 0.14, 0.11, 0.08, 0.06, 0.05, 0.03, 0.02, 0.02, 0.01, 0.01, 0.01,
@@ -102,4 +102,4 @@
 for filter, vals in johnson_filters.items():
     wavelen = vals["wavelen"]
     trans = vals["trans"]
-    vals["Lchar"] = wavelen[na.argmax(trans)]
+    vals["Lchar"] = wavelen[np.argmax(trans)]


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -24,7 +24,7 @@
 """
 
 import __builtin__
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.math_utils import *
@@ -37,6 +37,7 @@
     arr_ang2pix_nest, arr_fisheye_vectors
 from yt.utilities.math_utils import get_rotation_matrix
 from yt.utilities.orientation import Orientation
+from yt.data_objects.api import ImageArray
 from yt.visualization.image_writer import write_bitmap, write_image
 from yt.data_objects.data_containers import data_object_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -167,12 +168,12 @@
         >>> pf = EnzoStaticOutput('DD1701') # Load pf
         >>> c = [0.5]*3 # Center
         >>> L = [1.0,1.0,1.0] # Viewpoint
-        >>> W = na.sqrt(3) # Width
+        >>> W = np.sqrt(3) # Width
         >>> N = 1024 # Pixels (1024^2)
 
         # Get density min, max
         >>> mi, ma = pf.h.all_data().quantities['Extrema']('Density')[0]
-        >>> mi, ma = na.log10(mi), na.log10(ma)
+        >>> mi, ma = np.log10(mi), np.log10(ma)
 
         # Construct transfer function
         >>> tf = vr.ColorTransferFunction((mi-2, ma+2))
@@ -195,7 +196,7 @@
         if not iterable(width):
             width = (width, width, width) # left/right, top/bottom, front/back 
         self.orienter = Orientation(normal_vector, north_vector=north_vector, steady_north=steady_north)
-        self.rotation_vector = self.orienter.north_vector
+        self.rotation_vector = self.orienter.unit_vectors[1]
         self._setup_box_properties(width, center, self.orienter.unit_vectors)
         if fields is None: fields = ["Density"]
         self.fields = fields
@@ -226,10 +227,10 @@
     def _setup_box_properties(self, width, center, unit_vectors):
         self.width = width
         self.center = center
-        self.box_vectors = na.array([unit_vectors[0]*width[0],
+        self.box_vectors = np.array([unit_vectors[0]*width[0],
                                      unit_vectors[1]*width[1],
                                      unit_vectors[2]*width[2]])
-        self.origin = center - 0.5*na.dot(width,unit_vectors)
+        self.origin = center - 0.5*np.dot(width,unit_vectors)
         self.back_center =  center - 0.5*width[2]*unit_vectors[2]
         self.front_center = center + 0.5*width[2]*unit_vectors[2]         
 
@@ -282,39 +283,43 @@
         if center is not None:
             self.center = center
         if north_vector is None:
-            north_vector = self.orienter.north_vector
+            north_vector = self.orienter.unit_vectors[1]
         if normal_vector is None:
             normal_vector = self.orienter.normal_vector
         self.orienter.switch_orientation(normal_vector = normal_vector,
                                          north_vector = north_vector)
         self._setup_box_properties(width, self.center, self.orienter.unit_vectors)
     def new_image(self):
-        image = na.zeros((self.resolution[0], self.resolution[1], 3), dtype='float64', order='C')
+        image = np.zeros((self.resolution[0], self.resolution[1], 3), dtype='float64', order='C')
         return image
 
     def get_sampler_args(self, image):
-        rotp = na.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
+        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
         args = (rotp, self.box_vectors[2], self.back_center,
                 (-self.width[0]/2.0, self.width[0]/2.0,
                  -self.width[1]/2.0, self.width[1]/2.0),
                 image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
-                na.array(self.width), self.transfer_function, self.sub_samples)
+                np.array(self.width), self.transfer_function, self.sub_samples)
         return args
 
+    star_trees = None
     def get_sampler(self, args):
+        kwargs = {}
+        if self.star_trees is not None:
+            kwargs = {'star_list': self.star_trees}
         if self.use_light:
             if self.light_dir is None:
                 self.set_default_light_dir()
-            temp_dir = na.empty(3,dtype='float64')
+            temp_dir = np.empty(3,dtype='float64')
             temp_dir = self.light_dir[0] * self.orienter.unit_vectors[1] + \
                     self.light_dir[1] * self.orienter.unit_vectors[2] + \
                     self.light_dir[2] * self.orienter.unit_vectors[0]
             if self.light_rgba is None:
                 self.set_default_light_rgba()
             sampler = LightSourceRenderSampler(*args, light_dir=temp_dir,
-                    light_rgba=self.light_rgba)
+                    light_rgba=self.light_rgba, **kwargs)
         else:
-            sampler = self._sampler_object(*args)
+            sampler = self._sampler_object(*args, **kwargs)
         return sampler
 
     def finalize_image(self, image):
@@ -326,13 +331,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
 
         view_pos = self.front_center + self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
         for brick in self.volume.traverse(view_pos, self.front_center, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
 
         pbar.finish()
@@ -342,15 +347,21 @@
 
     def save_image(self, fn, clip_ratio, image):
         if self.comm.rank is 0 and fn is not None:
-            if clip_ratio is not None:
-                write_bitmap(image, fn, clip_ratio * image.std())
-            else:
-                write_bitmap(image, fn)
-
+            image.write_png(fn, clip_ratio=clip_ratio)
 
     def initialize_source(self):
         return self.volume.initialize_source()
 
+    def get_information(self):
+        info_dict = {'fields':self.fields,
+                     'type':self.__class__.__name__,
+                     'east_vector':self.orienter.unit_vectors[0],
+                     'north_vector':self.orienter.unit_vectors[1],
+                     'normal_vector':self.orienter.unit_vectors[2],
+                     'width':self.width,
+                     'dataset':self.pf.fullpath}
+        return info_dict
+
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
                  num_threads = 0):
         r"""Ray-cast the camera.
@@ -385,7 +396,9 @@
         args = self.get_sampler_args(image)
         sampler = self.get_sampler(args)
         self.initialize_source()
-        image = self._render(double_check, num_threads, image, sampler)
+        image = ImageArray(self._render(double_check, num_threads, 
+                                        image, sampler),
+                           info=self.get_information())
         self.save_image(fn, clip_ratio, image)
         return image
 
@@ -510,30 +523,30 @@
         >>> for i, snapshot in enumerate(cam.move_to([0.2,0.3,0.6], 10)):
         ...     iw.write_bitmap(snapshot, "move_%04i.png" % i)
         """
-        self.center = na.array(self.center)
+        self.center = np.array(self.center)
         dW = None
         if exponential:
             if final_width is not None:
                 if not iterable(final_width):
-                    width = na.array([final_width, final_width, final_width]) 
+                    width = np.array([final_width, final_width, final_width]) 
                     # left/right, top/bottom, front/back 
                 if (self.center == 0.0).all():
-                    self.center += (na.array(final) - self.center) / (10. * n_steps)
-                final_zoom = final_width/na.array(self.width)
+                    self.center += (np.array(final) - self.center) / (10. * n_steps)
+                final_zoom = final_width/np.array(self.width)
                 dW = final_zoom**(1.0/n_steps)
             else:
-                dW = na.array([1.0,1.0,1.0])
-            position_diff = (na.array(final)/self.center)*1.0
+                dW = np.array([1.0,1.0,1.0])
+            position_diff = (np.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
             if final_width is not None:
                 if not iterable(final_width):
-                    width = na.array([final_width, final_width, final_width]) 
+                    width = np.array([final_width, final_width, final_width]) 
                     # left/right, top/bottom, front/back
-                dW = (1.0*final_width-na.array(self.width))/n_steps
+                dW = (1.0*final_width-np.array(self.width))/n_steps
             else:
-                dW = na.array([0.0,0.0,0.0])
-            dx = (na.array(final)-self.center)*1.0/n_steps
+                dW = np.array([0.0,0.0,0.0])
+            dx = (np.array(final)-self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
                 self.switch_view(center=self.center*dx, width=self.width*dW)
@@ -559,7 +572,7 @@
         Examples
         --------
 
-        >>> cam.rotate(na.pi/4)
+        >>> cam.rotate(np.pi/4)
         """
         if rot_vector is None:
             rot_vector = self.rotation_vector
@@ -568,7 +581,7 @@
 
         normal_vector = self.front_center-self.center
 
-        self.switch_view(normal_vector=na.dot(R,normal_vector))
+        self.switch_view(normal_vector=np.dot(R,normal_vector))
 
     def roll(self, theta):
         r"""Roll by a given angle
@@ -583,12 +596,12 @@
         Examples
         --------
 
-        >>> cam.roll(na.pi/4)
+        >>> cam.roll(np.pi/4)
         """
         rot_vector = self.orienter.normal_vector
         R = get_rotation_matrix(theta, rot_vector)
-        north_vector = self.orienter.north_vector
-        self.switch_view(north_vector=na.dot(R, north_vector))
+        north_vector = self.orienter.unit_vectors[1]
+        self.switch_view(north_vector=np.dot(R, north_vector))
 
     def rotation(self, theta, n_steps, rot_vector=None, clip_ratio = None):
         r"""Loop over rotate, creating a rotation
@@ -613,7 +626,7 @@
         Examples
         --------
 
-        >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
+        >>> for i, snapshot in enumerate(cam.rotation(np.pi, 10)):
         ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
         """
 
@@ -665,7 +678,7 @@
 class PerspectiveCamera(Camera):
     expand_factor = 1.0
     def __init__(self, *args, **kwargs):
-        expand_factor = kwargs.pop('expand_factor', 1.0)
+        self.expand_factor = kwargs.pop('expand_factor', 1.0)
         Camera.__init__(self, *args, **kwargs)
 
     def get_sampler_args(self, image):
@@ -676,12 +689,12 @@
         self.front_center += self.expand_factor*dl
         self.back_center -= dl
 
-        px = na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
+        px = np.linspace(-self.width[0]/2.0, self.width[0]/2.0,
                          self.resolution[0])[:,None]
-        py = na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
+        py = np.linspace(-self.width[1]/2.0, self.width[1]/2.0,
                          self.resolution[1])[None,:]
         inv_mat = self.orienter.inv_mat
-        positions = na.zeros((self.resolution[0], self.resolution[1], 3),
+        positions = np.zeros((self.resolution[0], self.resolution[1], 3),
                           dtype='float64', order='C')
         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
         positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
@@ -693,22 +706,43 @@
         positions = self.front_center - 1.0*(((self.back_center-self.front_center)**2).sum())**0.5*vectors
         vectors = (self.front_center - positions)
 
-        uv = na.ones(3, dtype='float64')
+        uv = np.ones(3, dtype='float64')
         image.shape = (self.resolution[0]**2,1,3)
         vectors.shape = (self.resolution[0]**2,1,3)
         positions.shape = (self.resolution[0]**2,1,3)
         args = (positions, vectors, self.back_center, 
                 (0.0,1.0,0.0,1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'), 
+                np.zeros(3, dtype='float64'), 
                 self.transfer_function, self.sub_samples)
         return args
 
+    def _render(self, double_check, num_threads, image, sampler):
+        pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+        total_cells = 0
+        if double_check:
+            for brick in self.volume.bricks:
+                for data in brick.my_data:
+                    if np.any(np.isnan(data)):
+                        raise RuntimeError
+
+        view_pos = self.front_center
+        for brick in self.volume.traverse(view_pos, self.front_center, image):
+            sampler(brick, num_threads=num_threads)
+            total_cells += np.prod(brick.my_data[0].shape)
+            pbar.update(total_cells)
+
+        pbar.finish()
+        image = sampler.aimage
+        self.finalize_image(image)
+        return image
+
+
     def finalize_image(self, image):
         image.shape = self.resolution[0], self.resolution[0], 3
 
 def corners(left_edge, right_edge):
-    return na.array([
+    return np.array([
       [left_edge[:,0], left_edge[:,1], left_edge[:,2]],
       [right_edge[:,0], left_edge[:,1], left_edge[:,2]],
       [right_edge[:,0], right_edge[:,1], left_edge[:,2]],
@@ -720,19 +754,28 @@
     ], dtype='float64')
 
 class HEALpixCamera(Camera):
+
+    _sampler_object = None 
+    
     def __init__(self, center, radius, nside,
                  transfer_function = None, fields = None,
                  sub_samples = 5, log_fields = None, volume = None,
                  pf = None, use_kd=True, no_ghost=False, use_light=False):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.nside = nside
         self.use_kd = use_kd
         if transfer_function is None:
             transfer_function = ProjectionTransferFunction()
         self.transfer_function = transfer_function
+
+        if isinstance(self.transfer_function, ProjectionTransferFunction):
+            self._sampler_object = ProjectionSampler
+        else:
+            self._sampler_object = VolumeRenderSampler
+
         if fields is None: fields = ["Density"]
         self.fields = fields
         self.sub_samples = sub_samples
@@ -747,20 +790,20 @@
         self.volume = volume
 
     def new_image(self):
-        image = na.zeros((12 * self.nside ** 2, 1, 3), dtype='float64', order='C')
+        image = np.zeros((12 * self.nside ** 2, 1, 3), dtype='float64', order='C')
         return image
 
     def get_sampler_args(self, image):
         nv = 12 * self.nside ** 2
-        vs = arr_pix2vec_nest(self.nside, na.arange(nv))
+        vs = arr_pix2vec_nest(self.nside, np.arange(nv))
         vs *= self.radius
         vs.shape = nv, 1, 3
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((nv, 1, 3), dtype='float64') * self.center
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((nv, 1, 3), dtype='float64') * self.center
         args = (positions, vs, self.center,
                 (0.0, 1.0, 0.0, 1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
         return args
  
@@ -771,13 +814,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
         
         view_pos = self.center
         for brick in self.volume.traverse(view_pos, None, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         
         pbar.finish()
@@ -787,6 +830,15 @@
 
         return image
 
+    def get_information(self):
+        info_dict = {'fields':self.fields,
+                     'type':self.__class__.__name__,
+                     'center':self.center,
+                     'radius':self.radius,
+                     'dataset':self.pf.fullpath}
+        return info_dict
+
+
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
                  num_threads = 0, clim = None, label = None):
         r"""Ray-cast the camera.
@@ -814,7 +866,9 @@
         args = self.get_sampler_args(image)
         sampler = self.get_sampler(args)
         self.volume.initialize_source()
-        image = self._render(double_check, num_threads, image, sampler)
+        image = ImageArray(self._render(double_check, num_threads, 
+                                        image, sampler),
+                           info=self.get_information())
         self.save_image(fn, clim, image, label = label)
         return image
 
@@ -823,14 +877,14 @@
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
             import matplotlib.backends.backend_agg
-            phi, theta = na.mgrid[0.0:2*na.pi:800j, 0:na.pi:800j]
+            phi, theta = np.mgrid[0.0:2*np.pi:800j, 0:np.pi:800j]
             pixi = arr_ang2pix_nest(self.nside, theta.ravel(), phi.ravel())
             image *= self.radius * self.pf['cm']
-            img = na.log10(image[:,0,0][pixi]).reshape((800,800))
+            img = np.log10(image[:,0,0][pixi]).reshape((800,800))
 
             fig = matplotlib.figure.Figure((10, 5))
             ax = fig.add_subplot(1,1,1,projection='hammer')
-            implot = ax.imshow(img, extent=(-na.pi,na.pi,-na.pi/2,na.pi/2), clip_on=False, aspect=0.5)
+            implot = ax.imshow(img, extent=(-np.pi,np.pi,-np.pi/2,np.pi/2), clip_on=False, aspect=0.5)
             cb = fig.colorbar(implot, orientation='horizontal')
 
             if label == None:
@@ -852,7 +906,7 @@
                  rays_per_cell = 0.1, max_nside = 8192):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.use_kd = use_kd
         if transfer_function is None:
@@ -880,8 +934,8 @@
                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
         total_cells = 0
         bricks = [b for b in self.volume.traverse(None, self.center, None)][::-1]
-        left_edges = na.array([b.LeftEdge for b in bricks])
-        right_edges = na.array([b.RightEdge for b in bricks])
+        left_edges = np.array([b.LeftEdge for b in bricks])
+        right_edges = np.array([b.RightEdge for b in bricks])
         min_dx = min(((b.RightEdge[0] - b.LeftEdge[0])/b.my_data[0].shape[0]
                      for b in bricks))
         # We jitter a bit if we're on a boundary of our initial grid
@@ -896,7 +950,7 @@
         for i,brick in enumerate(bricks):
             ray_source.integrate_brick(brick, tfp, i, left_edges, right_edges,
                                        bricks)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         pbar.finish()
         info, values = ray_source.get_rays()
@@ -935,10 +989,10 @@
         self.use_light = use_light
         self.light_dir = None
         self.light_rgba = None
-        if rotation is None: rotation = na.eye(3)
+        if rotation is None: rotation = np.eye(3)
         self.rotation_matrix = rotation
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.fov = fov
         if iterable(resolution):
@@ -957,7 +1011,7 @@
         self.volume = volume
 
     def new_image(self):
-        image = na.zeros((self.resolution**2,1,3), dtype='float64', order='C')
+        image = np.zeros((self.resolution**2,1,3), dtype='float64', order='C')
         return image
         
     def get_sampler_args(self, image):
@@ -968,13 +1022,13 @@
             vp[:,:,i] = (vp2 * self.rotation_matrix[:,i]).sum(axis=2)
         del vp2
         vp *= self.radius
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
 
         args = (positions, vp, self.center,
                 (0.0, 1.0, 0.0, 1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
         return args
 
@@ -988,13 +1042,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
         
         view_pos = self.center
         for brick in self.volume.traverse(view_pos, None, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         
         pbar.finish()
@@ -1088,7 +1142,7 @@
         
         >>> field='Density'
         >>> mi,ma = pf.h.all_data().quantities['Extrema']('Density')[0]
-        >>> mi,ma = na.log10(mi), na.log10(ma)
+        >>> mi,ma = np.log10(mi), np.log10(ma)
         
         # You may want to comment out the above lines and manually set the min and max
         # of the log of the Density field. For example:
@@ -1106,7 +1160,7 @@
         # the color range to the min and max values, rather than the transfer function
         # bounds.
         >>> Nc = 5
-        >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=na.logspace(-2,0,Nc),
+        >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=np.logspace(-2,0,Nc),
         >>>         colormap='RdBu_r')
         >>> 
         # Create the camera object. Use the keyword: no_ghost=True if a lot of time is
@@ -1164,18 +1218,18 @@
             self.nimy = 1
         if pf is not None: self.pf = pf
         
-        if rotation is None: rotation = na.eye(3)
+        if rotation is None: rotation = np.eye(3)
         self.rotation_matrix = rotation
         
-        self.normal_vector = na.array([0.,0.,1])
-        self.north_vector = na.array([1.,0.,0.])
-        self.east_vector = na.array([0.,1.,0.])
+        self.normal_vector = np.array([0.,0.,1])
+        self.north_vector = np.array([1.,0.,0.])
+        self.east_vector = np.array([0.,1.,0.])
         self.rotation_vector = self.north_vector
 
         if iterable(resolution):
             raise RuntimeError("Resolution must be a single int")
         self.resolution = resolution
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.focal_center = focal_center
         self.radius = radius
         self.fov = fov
@@ -1195,17 +1249,17 @@
 
     def get_vector_plane(self):
         if self.focal_center is not None:
-            rvec =  na.array(self.focal_center) - na.array(self.center)
+            rvec =  np.array(self.focal_center) - np.array(self.center)
             rvec /= (rvec**2).sum()**0.5
-            angle = na.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
+            angle = np.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
                 (rvec**2).sum()**0.5))
-            rot_vector = na.cross(rvec, self.normal_vector)
+            rot_vector = np.cross(rvec, self.normal_vector)
             rot_vector /= (rot_vector**2).sum()**0.5
             
             self.rotation_matrix = get_rotation_matrix(angle,rot_vector)
-            self.normal_vector = na.dot(self.rotation_matrix,self.normal_vector)
-            self.north_vector = na.dot(self.rotation_matrix,self.north_vector)
-            self.east_vector = na.dot(self.rotation_matrix,self.east_vector)
+            self.normal_vector = np.dot(self.rotation_matrix,self.normal_vector)
+            self.north_vector = np.dot(self.rotation_matrix,self.north_vector)
+            self.east_vector = np.dot(self.rotation_matrix,self.east_vector)
         else:
             self.focal_center = self.center + self.radius*self.normal_vector  
         dist = ((self.focal_center - self.center)**2).sum()**0.5
@@ -1228,9 +1282,9 @@
             self.get_vector_plane()
 
         nx,ny = self.resolution/self.nimx, self.resolution/self.nimy
-        image = na.zeros((nx*ny,1,3), dtype='float64', order='C')
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((nx*ny, 1, 3), dtype='float64') * self.center
+        image = np.zeros((nx*ny,1,3), dtype='float64', order='C')
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((nx*ny, 1, 3), dtype='float64') * self.center
         vector_plane = VectorPlane(positions, self.vp, self.center,
                         (0.0, 1.0, 0.0, 1.0), image, uv, uv)
         tfp = TransferFunctionProxy(self.transfer_function)
@@ -1243,15 +1297,16 @@
         total_cells = 0
         for brick in self.volume.traverse(None, self.center, image):
             brick.cast_plane(tfp, vector_plane)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         pbar.finish()
         image.shape = (nx, ny, 3)
 
         if self.image is not None:
             del self.image
+        image = ImageArray(image,
+                           info=self.get_information())
         self.image = image
-       
         return image
 
     def save_image(self, fn, clip_ratio=None):
@@ -1269,7 +1324,7 @@
         if self.image_decomp:
             if self.comm.rank == 0:
                 if self.global_comm.rank == 0:
-                    final_image = na.empty((nx*self.nimx, 
+                    final_image = np.empty((nx*self.nimx, 
                         ny*self.nimy, 3),
                         dtype='float64',order='C')
                     final_image[:nx, :ny, :] = image
@@ -1312,7 +1367,7 @@
         Examples
         --------
 
-        >>> cam.rotate(na.pi/4)
+        >>> cam.rotate(np.pi/4)
         """
         if rot_vector is None:
             rot_vector = self.north_vector
@@ -1322,9 +1377,9 @@
         R = get_rotation_matrix(theta, rot_vector)
 
         self.vp = rotate_vectors(self.vp, R)
-        self.normal_vector = na.dot(R,self.normal_vector)
-        self.north_vector = na.dot(R,self.north_vector)
-        self.east_vector = na.dot(R,self.east_vector)
+        self.normal_vector = np.dot(R,self.normal_vector)
+        self.north_vector = np.dot(R,self.north_vector)
+        self.east_vector = np.dot(R,self.east_vector)
 
         if keep_focus:
             self.center = self.focal_center - dist*self.normal_vector
@@ -1349,7 +1404,7 @@
         Examples
         --------
 
-        >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
+        >>> for i, snapshot in enumerate(cam.rotation(np.pi, 10)):
         ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
         """
 
@@ -1381,10 +1436,10 @@
         ...     cam.save_image('move_%04i.png' % i)
         """
         if exponential:
-            position_diff = (na.array(final)/self.center)*1.0
+            position_diff = (np.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
-            dx = (na.array(final) - self.center)*1.0/n_steps
+            dx = (np.array(final) - self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
                 self.center *= dx
@@ -1426,7 +1481,7 @@
         effects of nearby cells.
     rotation : optional, 3x3 array
         If supplied, the vectors will be rotated by this.  You can construct
-        this by, for instance, calling na.array([v1,v2,v3]) where those are the
+        this by, for instance, calling np.array([v1,v2,v3]) where those are the
         three reference planes of an orthogonal frame (see ortho_find).
 
     Returns
@@ -1445,7 +1500,7 @@
     # We manually modify the ProjectionTransferFunction to get it to work the
     # way we want, with a second field that's also passed through.
     fields = [field]
-    center = na.array(center, dtype='float64')
+    center = np.array(center, dtype='float64')
     if weight is not None:
         # This is a temporary field, which we will remove at the end.
         def _make_wf(f, w):
@@ -1457,8 +1512,8 @@
             function=_make_wf(field, weight))
         fields = ["temp_weightfield", weight]
     nv = 12*nside**2
-    image = na.zeros((nv,1,3), dtype='float64', order='C')
-    vs = arr_pix2vec_nest(nside, na.arange(nv))
+    image = np.zeros((nv,1,3), dtype='float64', order='C')
+    vs = arr_pix2vec_nest(nside, np.arange(nv))
     vs.shape = (nv,1,3)
     if rotation is not None:
         vs2 = vs.copy()
@@ -1466,14 +1521,14 @@
             vs[:,:,i] = (vs2 * rotation[:,i]).sum(axis=2)
     else:
         vs += 1e-8
-    positions = na.ones((nv, 1, 3), dtype='float64', order='C') * center
+    positions = np.ones((nv, 1, 3), dtype='float64', order='C') * center
     dx = min(g.dds.min() for g in pf.h.find_point(center)[0])
     positions += inner_radius * dx * vs
     vs *= radius
-    uv = na.ones(3, dtype='float64')
+    uv = np.ones(3, dtype='float64')
     grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
-                                image, uv, uv, na.zeros(3, dtype='float64'))
+                                image, uv, uv, np.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
         data = [grid[field] * grid.child_mask.astype('float64')
@@ -1502,15 +1557,15 @@
                         take_log = True, resolution=512, cmin=None, cmax=None):
     import matplotlib.figure
     import matplotlib.backends.backend_agg
-    if rotation is None: rotation = na.eye(3).astype("float64")
+    if rotation is None: rotation = np.eye(3).astype("float64")
 
     img, count = pixelize_healpix(nside, image, resolution, resolution, rotation)
 
     fig = matplotlib.figure.Figure((10, 5))
     ax = fig.add_subplot(1,1,1,projection='aitoff')
-    if take_log: func = na.log10
+    if take_log: func = np.log10
     else: func = lambda a: a
-    implot = ax.imshow(func(img), extent=(-na.pi,na.pi,-na.pi/2,na.pi/2),
+    implot = ax.imshow(func(img), extent=(-np.pi,np.pi,-np.pi/2,np.pi/2),
                        clip_on=False, aspect=0.5, vmin=cmin, vmax=cmax)
     cb = fig.colorbar(implot, orientation='horizontal')
     cb.set_label(label)
@@ -1568,12 +1623,12 @@
             pass
 
     def get_sampler_args(self, image):
-        rotp = na.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
+        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
         args = (rotp, self.box_vectors[2], self.back_center,
             (-self.width[0]/2, self.width[0]/2,
              -self.width[1]/2, self.width[1]/2),
             image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
-                na.array(self.width), self.sub_samples)
+                np.array(self.width), self.sub_samples)
         return args
 
     def finalize_image(self,image):
@@ -1607,8 +1662,8 @@
                     this_point = (self.center + width/2. * off1 * north_vector
                                          + width/2. * off2 * east_vector
                                          + width/2. * off3 * normal_vector)
-                    na.minimum(mi, this_point, mi)
-                    na.maximum(ma, this_point, ma)
+                    np.minimum(mi, this_point, mi)
+                    np.maximum(ma, this_point, ma)
         # Now we have a bounding box.
         grids = pf.h.region(self.center, mi, ma)._grids
 
@@ -1630,7 +1685,7 @@
 
     def save_image(self, fn, clip_ratio, image):
         if self.pf.field_info[self.field].take_log:
-            im = na.log10(image)
+            im = np.log10(image)
         else:
             im = image
         if self.comm.rank is 0 and fn is not None:
@@ -1656,7 +1711,9 @@
 
         self.initialize_source()
 
-        image = self._render(double_check, num_threads, image, sampler)
+        image = ImageArray(self._render(double_check, num_threads, 
+                                        image, sampler),
+                           info=self.get_information())
 
         self.save_image(fn, clip_ratio, image)
 
@@ -1667,7 +1724,8 @@
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
                         field, weight = None, 
-                        volume = None, no_ghost = False, interpolated = False):
+                        volume = None, no_ghost = False, interpolated = False,
+                        north_vector = None):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
@@ -1722,12 +1780,13 @@
 
     >>> image = off_axis_projection(pf, [0.5, 0.5, 0.5], [0.2,0.3,0.4],
                       0.2, N, "Temperature", "Density")
-    >>> write_image(na.log10(image), "offaxis.png")
+    >>> write_image(np.log10(image), "offaxis.png")
 
     """
     projcam = ProjectionCamera(center, normal_vector, width, resolution,
-            field, weight=weight, pf=pf, volume=volume,
-            no_ghost=no_ghost, interpolated=interpolated)
+                               field, weight=weight, pf=pf, volume=volume,
+                               no_ghost=no_ghost, interpolated=interpolated, 
+                               north_vector=north_vector)
     image = projcam.snapshot()
     if weight is not None:
         pf.field_info.pop("temp_weightfield")


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/volume_rendering/camera_path.py
--- a/yt/visualization/volume_rendering/camera_path.py
+++ b/yt/visualization/volume_rendering/camera_path.py
@@ -24,7 +24,7 @@
 """
 
 import random
-import numpy as na
+import numpy as np
 from .create_spline import create_spline
 
 class Keyframes(object):
@@ -67,12 +67,12 @@
         Examples
         --------
 
-        >>> import numpy as na
+        >>> import numpy as np
         >>> import matplotlib.pyplot as plt
         >>> from yt.visualization.volume_rendering.camera_path import *
 
         # Make a camera path from 10 random (x,y,z) keyframes
-        >>> data = na.random.random.((10,3))
+        >>> data = np.random.random.((10,3))
         >>> kf = Keyframes(data[:,0], data[:,1], data[:,2])
         >>> path = kf.create_path(250, shortest_path=False)
 
@@ -93,7 +93,7 @@
             print "Need Nx (%d) == Ny (%d) == Nz (%d)" % (Nx, Ny, Nz)
             sys.exit()
         self.nframes = Nx
-        self.pos = na.zeros((Nx,3))
+        self.pos = np.zeros((Nx,3))
         self.pos[:,0] = x
         self.pos[:,1] = y
         if z != None:
@@ -103,7 +103,7 @@
         self.north_vectors = north_vectors
         self.up_vectors = up_vectors
         if times == None:
-            self.times = na.arange(self.nframes)
+            self.times = np.arange(self.nframes)
         else:
             self.times = times
         self.cartesian_matrix()
@@ -131,7 +131,7 @@
         """
         # randomize tour
         self.tour = range(self.nframes)
-        na.random.shuffle(self.tour)
+        np.random.shuffle(self.tour)
         if fixed_start:
             first = self.tour.index(0)
             self.tour[0], self.tour[first] = self.tour[first], self.tour[0]
@@ -191,17 +191,17 @@
         Create a distance matrix for the city coords that uses
         straight line distance
         """
-        self.dist_matrix = na.zeros((self.nframes, self.nframes))
-        xmat = na.zeros((self.nframes, self.nframes))
+        self.dist_matrix = np.zeros((self.nframes, self.nframes))
+        xmat = np.zeros((self.nframes, self.nframes))
         xmat[:,:] = self.pos[:,0]
         dx = xmat - xmat.T
-        ymat = na.zeros((self.nframes, self.nframes))
+        ymat = np.zeros((self.nframes, self.nframes))
         ymat[:,:] = self.pos[:,1]
         dy = ymat - ymat.T
-        zmat = na.zeros((self.nframes, self.nframes))
+        zmat = np.zeros((self.nframes, self.nframes))
         zmat[:,:] = self.pos[:,2]
         dz = zmat - zmat.T
-        self.dist_matrix = na.sqrt(dx*dx + dy*dy + dz*dz)
+        self.dist_matrix = np.sqrt(dx*dx + dy*dy + dz*dz)
 
     def tour_length(self, tour):
         r"""
@@ -227,7 +227,7 @@
         if next > prev:
             return 1.0
         else:
-            return na.exp( -abs(next-prev) / temperature )
+            return np.exp( -abs(next-prev) / temperature )
 
     def get_shortest_path(self):
         r"""Determine shortest path between all keyframes.
@@ -294,14 +294,14 @@
             path.  Also saved to self.path.
         """
         self.npoints = npoints
-        self.path = {"time": na.zeros(npoints),
-                     "position": na.zeros((npoints, 3)),
-                     "north_vectors": na.zeros((npoints,3)),
-                     "up_vectors": na.zeros((npoints,3))}
+        self.path = {"time": np.zeros(npoints),
+                     "position": np.zeros((npoints, 3)),
+                     "north_vectors": np.zeros((npoints,3)),
+                     "up_vectors": np.zeros((npoints,3))}
         if shortest_path:
             self.get_shortest_path()
         if path_time == None:
-            path_time = na.linspace(0, self.nframes, npoints)
+            path_time = np.linspace(0, self.nframes, npoints)
         self.path["time"] = path_time
         for dim in range(3):
             self.path["position"][:,dim] = create_spline(self.times, self.pos[:,dim],


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/volume_rendering/create_spline.py
--- a/yt/visualization/volume_rendering/create_spline.py
+++ b/yt/visualization/volume_rendering/create_spline.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def create_spline(old_x, old_y, new_x, tension=0.5, sorted=False):
     """
@@ -45,18 +45,18 @@
     """
     ndata = len(old_x)
     N = len(new_x)
-    result = na.zeros(N)
+    result = np.zeros(N)
     if not sorted:
-        isort = na.argsort(old_x)
+        isort = np.argsort(old_x)
         old_x = old_x[isort]
         old_y = old_y[isort]
     # Floor/ceiling of values outside of the original data
-    new_x = na.minimum(new_x, old_x[-1])
-    new_x = na.maximum(new_x, old_x[0])
-    ind = na.searchsorted(old_x, new_x)
-    im2 = na.maximum(ind-2, 0)
-    im1 = na.maximum(ind-1, 0)
-    ip1 = na.minimum(ind+1, ndata-1)
+    new_x = np.minimum(new_x, old_x[-1])
+    new_x = np.maximum(new_x, old_x[0])
+    ind = np.searchsorted(old_x, new_x)
+    im2 = np.maximum(ind-2, 0)
+    im1 = np.maximum(ind-1, 0)
+    ip1 = np.minimum(ind+1, ndata-1)
     for i in range(N):
         if ind[i] != im1[i]:
             u = (new_x[i] - old_x[im1[i]]) / (old_x[ind[i]] - old_x[im1[i]])


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/volume_rendering/grid_partitioner.py
--- a/yt/visualization/volume_rendering/grid_partitioner.py
+++ b/yt/visualization/volume_rendering/grid_partitioner.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 import h5py
 
@@ -63,10 +63,10 @@
                    len(self.bricks), back_point, front_point)
         if self.bricks is None: self.initialize_source()
         vec = front_point - back_point
-        dist = na.minimum(
-             na.sum((self.brick_left_edges - back_point) * vec, axis=1),
-             na.sum((self.brick_right_edges - back_point) * vec, axis=1))
-        ind = na.argsort(dist)
+        dist = np.minimum(
+             np.sum((self.brick_left_edges - back_point) * vec, axis=1),
+             np.sum((self.brick_right_edges - back_point) * vec, axis=1))
+        ind = np.argsort(dist)
         for b in self.bricks[ind]:
             #print b.LeftEdge, b.RightEdge
             yield b
@@ -79,7 +79,7 @@
         for field, log_field in zip(self.fields, self.log_fields):
             vcd = grid.get_vertex_centered_data(field, no_ghost = self.no_ghost)
             vcd = vcd.astype("float64")
-            if log_field: vcd = na.log10(vcd)
+            if log_field: vcd = np.log10(vcd)
             vcds.append(vcd)
 
         GF = GridFaces(grid.Children + [grid])
@@ -121,11 +121,11 @@
         # intersection, we only need to do the left edge & right edge.
         #
         # We're going to double up a little bit here in memory.
-        self.brick_left_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_parents = na.zeros( NB, dtype='int64')
-        self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
-        self.bricks = na.empty(len(bricks), dtype='object')
+        self.brick_left_edges = np.zeros( (NB, 3), dtype='float64')
+        self.brick_right_edges = np.zeros( (NB, 3), dtype='float64')
+        self.brick_parents = np.zeros( NB, dtype='int64')
+        self.brick_dimensions = np.zeros( (NB, 3), dtype='int64')
+        self.bricks = np.empty(len(bricks), dtype='object')
         for i,b in enumerate(bricks):
             self.brick_left_edges[i,:] = b.LeftEdge
             self.brick_right_edges[i,:] = b.RightEdge
@@ -143,12 +143,12 @@
             for j in [-1, 1]:
                 for k in [-1, 1]:
                     for b in self.bricks:
-                        BB = na.array([b.LeftEdge * [i,j,k], b.RightEdge * [i,j,k]])
-                        LE, RE = na.min(BB, axis=0), na.max(BB, axis=0)
+                        BB = np.array([b.LeftEdge * [i,j,k], b.RightEdge * [i,j,k]])
+                        LE, RE = np.min(BB, axis=0), np.max(BB, axis=0)
                         nb.append(
                             PartitionedGrid(b.parent_grid_id, len(b.my_data), 
                                 [md[::i,::j,::k].copy("C") for md in b.my_data],
-                                LE, RE, na.array(b.my_data[0].shape) - 1))
+                                LE, RE, np.array(b.my_data[0].shape) - 1))
         # Replace old bricks
         self.initialize_bricks(nb)
 
@@ -183,7 +183,7 @@
                                 self.brick_right_edges[i,:],
                                 self.brick_dimensions[i,:],
                                 ))
-        self.bricks = na.array(bricks, dtype='object')
+        self.bricks = np.array(bricks, dtype='object')
         f.close()
 
     def reset_cast(self):
@@ -194,10 +194,10 @@
     def __init__(self, data_array):
         self.bricks = [PartitionedGrid(-1, 1, 
                        [data_array.astype("float64")],
-                       na.zeros(3, dtype='float64'),
-                       na.ones(3, dtype='float64'),
-                       na.array(data_array.shape, dtype='int64')-1)]
-        self.brick_dimensions = na.ones((1, 3), dtype='int64')*data_array.shape
+                       np.zeros(3, dtype='float64'),
+                       np.ones(3, dtype='float64'),
+                       np.array(data_array.shape, dtype='int64')-1)]
+        self.brick_dimensions = np.ones((1, 3), dtype='int64')*data_array.shape
 
     def initialize_source(self):
         pass
@@ -221,24 +221,24 @@
     def __getitem__(self, item):
         return self.faces[item]
 
-def export_partitioned_grids(grid_list, fn, int_type=na.int64, float_type=na.float64):
+def export_partitioned_grids(grid_list, fn, int_type=np.int64, float_type=np.float64):
     f = h5py.File(fn, "w")
     pbar = get_pbar("Writing Grids", len(grid_list))
     nelem = sum((grid.my_data.size for grid in grid_list))
     ngrids = len(grid_list)
     group = f.create_group("/PGrids")
-    left_edge = na.concatenate([[grid.LeftEdge,] for grid in grid_list])
+    left_edge = np.concatenate([[grid.LeftEdge,] for grid in grid_list])
     f.create_dataset("/PGrids/LeftEdges", data=left_edge, dtype=float_type); del left_edge
-    right_edge = na.concatenate([[grid.RightEdge,] for grid in grid_list])
+    right_edge = np.concatenate([[grid.RightEdge,] for grid in grid_list])
     f.create_dataset("/PGrids/RightEdges", data=right_edge, dtype=float_type); del right_edge
-    dims = na.concatenate([[grid.my_data.shape[:],] for grid in grid_list])
+    dims = np.concatenate([[grid.my_data.shape[:],] for grid in grid_list])
     f.create_dataset("/PGrids/Dims", data=dims, dtype=int_type); del dims
-    data = na.concatenate([grid.my_data.ravel() for grid in grid_list])
+    data = np.concatenate([grid.my_data.ravel() for grid in grid_list])
     f.create_dataset("/PGrids/Data", data=data, dtype=float_type); del data
     f.close()
     pbar.finish()
 
-def import_partitioned_grids(fn, int_type=na.int64, float_type=na.float64):
+def import_partitioned_grids(fn, int_type=np.int64, float_type=np.float64):
     f = h5py.File(fn, "r")
     n_groups = len(f)
     grid_list = []
@@ -258,4 +258,4 @@
         pbar.update(i)
     pbar.finish()
     f.close()
-    return na.array(grid_list, dtype='object')
+    return np.array(grid_list, dtype='object')


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -25,7 +25,7 @@
 import h5py
 try: import pyfits
 except: pass
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -67,7 +67,7 @@
         f.close()
     else:
         print 'No support for fits import.'
-    return na.array([r,g,b,a]).swapaxes(0,2).swapaxes(0,1)
+    return np.array([r,g,b,a]).swapaxes(0,2).swapaxes(0,1)
 
 def plot_channel(image, name, cmap='gist_heat', log=True, dex=3, zero_factor=1.0e-10, 
                  label=None, label_color='w', label_size='large'):
@@ -84,7 +84,7 @@
     import matplotlib
     import pylab
     Nvec = image.shape[0]
-    image[na.isnan(image)] = 0.0
+    image[np.isnan(image)] = 0.0
     ma = image[image>0.0].max()
     image[image==0.0] = ma*zero_factor
     if log:
@@ -113,7 +113,7 @@
     """
     import pylab
     Nvec = image.shape[0]
-    image[na.isnan(image)] = 0.0
+    image[np.isnan(image)] = 0.0
     if image.shape[2] >= 4:
         image = image[:,:,:3]
     pylab.clf()


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/volume_rendering/multi_texture.py
--- a/yt/visualization/volume_rendering/multi_texture.py
+++ b/yt/visualization/volume_rendering/multi_texture.py
@@ -261,7 +261,7 @@
         tex_coord.Append((t1,t0,t1)); ver_coord.Append((x1, y0, z1)) # 7
         
         # Store quads
-        self._quads[tex_id] = (tex_coord, ver_coord, na.array(indices,dtype=na.uint8))
+        self._quads[tex_id] = (tex_coord, ver_coord, np.array(indices,dtype=np.uint8))
 
 def visvis_plot(vp):
     """
@@ -280,10 +280,10 @@
     ax = vv.gca()
 
     for i,g in enumerate(gs):
-        ss = ((g.RightEdge - g.LeftEdge) / (na.array(g.my_data[0].shape)-1)).tolist()
+        ss = ((g.RightEdge - g.LeftEdge) / (np.array(g.my_data[0].shape)-1)).tolist()
         origin = g.LeftEdge.astype("float32").tolist()
         dd = (g.my_data[0].astype("float32") - mi)/(ma - mi)
-        dd = na.clip(dd, 0.0, 1.0)
+        dd = np.clip(dd, 0.0, 1.0)
         print ss
         texes.append(vv.Aarray(dd, origin = origin, sampling = ss))
 


diff -r e3f22fd4ecbb72a614d1e6abcd001d3d1760efd6 -r 33a1976b9deef39ea2844784c78cdd03ef798af4 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from matplotlib.cm import get_cmap
 
 from yt.funcs import *
@@ -59,10 +59,10 @@
         self.pass_through = 0
         self.nbins = nbins
         self.x_bounds = x_bounds
-        self.x = na.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
-        self.y = na.zeros(nbins, dtype='float64')
+        self.x = np.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
+        self.y = np.zeros(nbins, dtype='float64')
         self.grad_field = -1
-        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
+        self.light_source_v = self.light_source_c = np.zeros(3, 'float64')
 
     def add_gaussian(self, location, width, height):
         r"""Add a Gaussian distribution to the transfer function.
@@ -88,8 +88,8 @@
         >>> tf = TransferFunction( (-10.0, -5.0) )
         >>> tf.add_gaussian(-9.0, 0.01, 1.0)
         """
-        vals = height * na.exp(-(self.x - location)**2.0/width)
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        vals = height * np.exp(-(self.x - location)**2.0/width)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_line(self, start, stop):
         r"""Add a line between two points to the transmission function.
@@ -122,7 +122,7 @@
         # not satisfy our bounding box arguments
         vals = slope * (self.x - x0) + y0
         vals[~((self.x >= x0) & (self.x <= x1))] = 0.0
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_step(self, start, stop, value):
         r"""Adds a step function to the transfer function.
@@ -154,12 +154,12 @@
         >>> tf.add_gaussian(-7.0, 0.01, 1.0)
         >>> tf.add_step(-8.0, -6.0, 0.5)
         """
-        vals = na.zeros(self.x.shape, 'float64')
+        vals = np.zeros(self.x.shape, 'float64')
         vals[(self.x >= start) & (self.x <= stop)] = value
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_filtered_planck(self, wavelength, trans):
-        vals = na.zeros(self.x.shape, 'float64')
+        vals = np.zeros(self.x.shape, 'float64')
         nu = clight/(wavelength*1e-8)
         nu = nu[::-1]
 
@@ -167,15 +167,15 @@
             T = 10**logT
             # Black body at this nu, T
             Bnu = ((2.0 * hcgs * nu**3) / clight**2.0) / \
-                    (na.exp(hcgs * nu / (kboltz * T)) - 1.0)
+                    (np.exp(hcgs * nu / (kboltz * T)) - 1.0)
             # transmission
             f = Bnu * trans[::-1]
             # integrate transmission over nu
-            vals[i] = na.trapz(f,nu)
+            vals[i] = np.trapz(f,nu)
 
         # normalize by total transmission over filter
-        self.y = vals/trans.sum() #/na.trapz(trans[::-1],nu)
-        #self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
+        self.y = vals/trans.sum() #/np.trapz(trans[::-1],nu)
+        #self.y = np.clip(np.maximum(vals, self.y), 0.0, 1.0)
 
     def plot(self, filename):
         r"""Save an image file of the transfer function.
@@ -245,7 +245,7 @@
         self.field_table_ids = [0] * 6
         self.weight_table_ids = [-1] * 6
         self.grad_field = -1
-        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
+        self.light_source_v = self.light_source_c = np.zeros(3, 'float64')
 
     def add_field_table(self, table, field_id, weight_field_id = -1,
                         weight_table_id = -1):
@@ -459,20 +459,20 @@
         from matplotlib.ticker import FuncFormatter
         pyplot.clf()
         ax = pyplot.axes()
-        i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
-        i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
-        i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
-        i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+        i_data = np.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+        i_data[:,:,0] = np.outer(np.ones(self.alpha.x.size), self.funcs[0].y)
+        i_data[:,:,1] = np.outer(np.ones(self.alpha.x.size), self.funcs[1].y)
+        i_data[:,:,2] = np.outer(np.ones(self.alpha.x.size), self.funcs[2].y)
         ax.imshow(i_data, origin='lower')
-        ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+        ax.fill_between(np.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
         ax.set_xlim(0, self.alpha.x.size)
-        xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+        xticks = np.arange(np.ceil(self.alpha.x[0]), np.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
         xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
         ax.xaxis.set_ticks(xticks)
         def x_format(x, pos):
             return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
         ax.xaxis.set_major_formatter(FuncFormatter(x_format))
-        yticks = na.linspace(0,1,5) * self.alpha.y.size
+        yticks = np.linspace(0,1,5) * self.alpha.y.size
         ax.yaxis.set_ticks(yticks)
         def y_format(y, pos):
             return (y / self.alpha.y.size)
@@ -500,20 +500,20 @@
         from matplotlib.ticker import FuncFormatter
         pyplot.clf()
         ax = pyplot.axes()
-        i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
-        i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
-        i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
-        i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+        i_data = np.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+        i_data[:,:,0] = np.outer(np.ones(self.alpha.x.size), self.funcs[0].y)
+        i_data[:,:,1] = np.outer(np.ones(self.alpha.x.size), self.funcs[1].y)
+        i_data[:,:,2] = np.outer(np.ones(self.alpha.x.size), self.funcs[2].y)
         ax.imshow(i_data, origin='lower')
-        ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+        ax.fill_between(np.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
         ax.set_xlim(0, self.alpha.x.size)
-        xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+        xticks = np.arange(np.ceil(self.alpha.x[0]), np.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
         xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
         ax.xaxis.set_ticks(xticks)
         def x_format(x, pos):
             return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
         ax.xaxis.set_major_formatter(FuncFormatter(x_format))
-        yticks = na.linspace(0,1,5) * self.alpha.y.size
+        yticks = np.linspace(0,1,5) * self.alpha.y.size
         ax.yaxis.set_ticks(yticks)
         def y_format(y, pos):
             return (y / self.alpha.y.size)
@@ -574,7 +574,7 @@
             self.x_bounds[0]))
         rel1 = int(self.nbins*(ma - self.x_bounds[0])/(self.x_bounds[1] -
             self.x_bounds[0]))
-        tomap = na.linspace(0.,1.,num=rel1-rel0)
+        tomap = np.linspace(0.,1.,num=rel1-rel0)
         cmap = get_cmap(colormap)
         cc = cmap(tomap)*scale
         if scale_func is None:
@@ -640,17 +640,17 @@
             if ma is None: ma = col_bounds[1] - dist/(10.0*N)
         if w is None: w = 0.001 * (ma-mi)/N
         if alpha is None and self.grey_opacity:
-            alpha = na.ones(N, dtype="float64")
+            alpha = np.ones(N, dtype="float64")
         elif alpha is None and not self.grey_opacity:
-            alpha = na.logspace(-3, 0, N)
-        for v, a in zip(na.mgrid[mi:ma:N*1j], alpha):
+            alpha = np.logspace(-3, 0, N)
+        for v, a in zip(np.mgrid[mi:ma:N*1j], alpha):
             self.sample_colormap(v, w, a, colormap=colormap, col_bounds=col_bounds)
 
     def get_colormap_image(self, height, width):
-        image = na.zeros((height, width, 3), dtype='uint8')
-        hvals = na.mgrid[self.x_bounds[0]:self.x_bounds[1]:height * 1j]
+        image = np.zeros((height, width, 3), dtype='uint8')
+        hvals = np.mgrid[self.x_bounds[0]:self.x_bounds[1]:height * 1j]
         for i,f in enumerate(self.funcs[:3]):
-            vals = na.interp(hvals, f.x, f.y)
+            vals = np.interp(hvals, f.x, f.y)
             image[:,:,i] = (vals[:,None] * 255).astype('uint8')
         image = image[::-1,:,:]
         return image
@@ -736,7 +736,7 @@
         self._normalize()
 
     def _normalize(self):
-        fmax  = na.array([f.y for f in self.tables[:3]])
+        fmax  = np.array([f.y for f in self.tables[:3]])
         normal = fmax.max(axis=0)
         for f in self.tables[:3]:
             f.y = f.y/normal



https://bitbucket.org/yt_analysis/yt-3.0/changeset/34145571fc0b/
changeset:   34145571fc0b
branch:      yt
user:        MatthewTurk
date:        2012-10-17 22:30:36
summary:     Fixing some includes in the manifest
affected #:  1 file

diff -r ff017cd2c1a8a85e5894d78b8bbf5745a84dc959 -r 34145571fc0b06761f18e71db1ea48046609ff17 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
-include distribute_setup.py
+include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.hh *.h README* CREDITS FUNDING LICENSE
+recursive-include yt *.pyx *.pxd *.hh *.h README*



https://bitbucket.org/yt_analysis/yt-3.0/changeset/883eb7631b46/
changeset:   883eb7631b46
branch:      yt
user:        MatthewTurk
date:        2012-10-17 22:51:46
summary:     Merge
affected #:  3 files

diff -r 34145571fc0b06761f18e71db1ea48046609ff17 -r 883eb7631b460d62733b6ffe9996a17dbf5c03cd yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -718,7 +718,9 @@
             Default=True.
         njobs : int
             The number of jobs over which to split the projections.  Set
-            to -1 so that each halo is done by a single processor.
+            to -1 so that each halo is done by a single processor.  Halo 
+            projections do not currently work in parallel, so this must 
+            be set to -1.
             Default: -1.
         dynamic : bool
             If True, distribute halos using a task queue.  If False,
@@ -732,6 +734,12 @@
 
         """
 
+        # Halo projections cannot run in parallel because they are done by 
+        # giving a data source to the projection object.
+        if njobs > 0:
+            mylog.warn("Halo projections cannot use more than one processor per halo, setting njobs to -1.")
+            njobs = -1
+        
         # Get list of halos for projecting.
         if halo_list == 'filtered':
             halo_projection_list = self.filtered_halos


diff -r 34145571fc0b06761f18e71db1ea48046609ff17 -r 883eb7631b460d62733b6ffe9996a17dbf5c03cd yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -237,6 +237,7 @@
     def __set_default_field_parameters(self):
         self.set_field_parameter("center",np.zeros(3,dtype='float64'))
         self.set_field_parameter("bulk_velocity",np.zeros(3,dtype='float64'))
+        self.set_field_parameter("normal",np.array([0,0,1],dtype='float64'))
 
     def _set_center(self, center):
         if center is None:


diff -r 34145571fc0b06761f18e71db1ea48046609ff17 -r 883eb7631b460d62733b6ffe9996a17dbf5c03cd yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -876,8 +876,6 @@
 
 def _RadialVelocity(field, data):
     normal = data.get_field_parameter("normal")
-    if normal == None:
-        normal = [0,0,1]
     velocities = obtain_rv_vec(data).transpose()    
     theta = np.tile(data['sph_theta'], (3, 1)).transpose()
     phi   = np.tile(data['sph_phi'], (3, 1)).transpose()



https://bitbucket.org/yt_analysis/yt-3.0/changeset/12b9b286e3e1/
changeset:   12b9b286e3e1
branch:      yt
user:        brittonsmith
date:        2012-10-18 15:53:30
summary:     Adding tests for rays and ortho rays.
affected #:  2 files

diff -r 75eab6836b445eb7fe75017d761b06ae63172310 -r 12b9b286e3e1eb752f70759a5b939dc36b0bb327 yt/data_objects/tests/test_ortho_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -0,0 +1,25 @@
+from yt.testing import *
+
+def test_ortho_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
+
+    axes = ['x', 'y', 'z']
+    for ax, an in enumerate(axes):
+        ocoord = np.random.random(2)
+
+        my_oray = pf.h.ortho_ray(ax, ocoord)
+
+        my_axes = range(3)
+        del my_axes[ax]
+
+        # find the cells intersected by the ortho ray
+        my_all = pf.h.all_data()
+        my_cells = (np.abs(my_all[axes[my_axes[0]]] - ocoord[0]) <= 
+                    0.5 * dx[my_axes[0]]) & \
+                   (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
+                    0.5 * dx[my_axes[1]])
+
+        assert_equal(my_oray['Density'].sum(),
+                     my_all['Density'][my_cells].sum())


diff -r 75eab6836b445eb7fe75017d761b06ae63172310 -r 12b9b286e3e1eb752f70759a5b939dc36b0bb327 yt/data_objects/tests/test_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_rays.py
@@ -0,0 +1,31 @@
+from yt.testing import *
+
+def test_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+      pf.domain_dimensions
+
+    p1 = np.random.random(3)
+    p2 = np.random.random(3)
+
+    my_ray = pf.h.ray(p1, p2)
+    assert_equal(my_ray['dts'].sum(), 1.0)
+    ray_cells = my_ray['dts'] > 0
+
+    # find cells intersected by the ray
+    my_all = pf.h.all_data()
+    
+    dt = np.abs(dx / (p2 - p1))
+    tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
+                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
+                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
+    tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
+                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
+                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
+    tin = tin.max(axis=0)
+    tout = tout.min(axis=0)
+    my_cells = (tin < tout) & (tin < 1) & (tout > 0)
+
+    assert_equal(ray_cells.sum(), my_cells.sum())
+    assert_equal(my_ray['Density'][ray_cells].sum(),
+                 my_all['Density'][my_cells].sum())           



https://bitbucket.org/yt_analysis/yt-3.0/changeset/06462f53a16b/
changeset:   06462f53a16b
branch:      yt
user:        MatthewTurk
date:        2012-10-18 17:41:17
summary:     Merged in brittonsmith/yt (pull request #305)
affected #:  2 files

diff -r 883eb7631b460d62733b6ffe9996a17dbf5c03cd -r 06462f53a16bbbb175a88920ed0c219aacbdc4d9 yt/data_objects/tests/test_ortho_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -0,0 +1,25 @@
+from yt.testing import *
+
+def test_ortho_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
+
+    axes = ['x', 'y', 'z']
+    for ax, an in enumerate(axes):
+        ocoord = np.random.random(2)
+
+        my_oray = pf.h.ortho_ray(ax, ocoord)
+
+        my_axes = range(3)
+        del my_axes[ax]
+
+        # find the cells intersected by the ortho ray
+        my_all = pf.h.all_data()
+        my_cells = (np.abs(my_all[axes[my_axes[0]]] - ocoord[0]) <= 
+                    0.5 * dx[my_axes[0]]) & \
+                   (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
+                    0.5 * dx[my_axes[1]])
+
+        assert_equal(my_oray['Density'].sum(),
+                     my_all['Density'][my_cells].sum())


diff -r 883eb7631b460d62733b6ffe9996a17dbf5c03cd -r 06462f53a16bbbb175a88920ed0c219aacbdc4d9 yt/data_objects/tests/test_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_rays.py
@@ -0,0 +1,31 @@
+from yt.testing import *
+
+def test_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+      pf.domain_dimensions
+
+    p1 = np.random.random(3)
+    p2 = np.random.random(3)
+
+    my_ray = pf.h.ray(p1, p2)
+    assert_equal(my_ray['dts'].sum(), 1.0)
+    ray_cells = my_ray['dts'] > 0
+
+    # find cells intersected by the ray
+    my_all = pf.h.all_data()
+    
+    dt = np.abs(dx / (p2 - p1))
+    tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
+                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
+                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
+    tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
+                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
+                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
+    tin = tin.max(axis=0)
+    tout = tout.min(axis=0)
+    my_cells = (tin < tout) & (tin < 1) & (tout > 0)
+
+    assert_equal(ray_cells.sum(), my_cells.sum())
+    assert_equal(my_ray['Density'][ray_cells].sum(),
+                 my_all['Density'][my_cells].sum())           



https://bitbucket.org/yt_analysis/yt-3.0/changeset/ae6617541438/
changeset:   ae6617541438
branch:      yt
user:        MatthewTurk
date:        2012-10-18 19:13:32
summary:     Adding a routine to test fields.  However, the aspect of making the parameter
file "realistic" points out how some fields are probably misplaced as
"universal."  I've also updated a few that rely on cosmological parameters to
use the attributes, not the dict-style access.

The test_fields.py routine should eventually be extended to handle particle
fields, but fake_random_pf does not yet do that.

Additionally, many of the tests currently fail because of buffer size
mismatches in the coordinate transformations.
affected #:  3 files

diff -r 06462f53a16bbbb175a88920ed0c219aacbdc4d9 -r ae66175414388a369d488d58fab308f84e80203b yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -160,7 +160,8 @@
             # required attrs
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
-                pf.hubble_constant = pf.cosmological_simulation = 0.0
+                pf.cosmological_simulation = 0.0
+            pf.hubble_constant = 0.7
             pf.domain_left_edge = np.zeros(3, 'float64')
             pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3


diff -r 06462f53a16bbbb175a88920ed0c219aacbdc4d9 -r ae66175414388a369d488d58fab308f84e80203b yt/data_objects/tests/test_fields.py
--- /dev/null
+++ b/yt/data_objects/tests/test_fields.py
@@ -0,0 +1,90 @@
+from yt.testing import *
+import numpy as np
+from yt.data_objects.field_info_container import \
+    FieldInfo
+import yt.data_objects.universal_fields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    np.seterr(all = 'ignore')
+
+_sample_parameters = dict(
+    axis = 0,
+    center = np.array((0.0, 0.0, 0.0)),
+    bulk_velocity = np.array((0.0, 0.0, 0.0)),
+    normal = np.array((0.0, 0.0, 1.0)),
+    cp_x_vec = np.array((1.0, 0.0, 0.0)),
+    cp_y_vec = np.array((0.0, 1.0, 0.0)),
+    cp_z_vec = np.array((0.0, 0.0, 1.0)),
+)
+
+_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
+
+def realistic_pf(fields, nprocs):
+    pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
+    pf.parameters["HydroMethod"] = "streaming"
+    pf.parameters["Gamma"] = 5.0/3.0
+    pf.parameters["EOSType"] = 1.0
+    pf.parameters["EOSSoundSpeed"] = 1.0
+    pf.conversion_factors["Time"] = 1.0
+    pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
+    pf.current_redshift = 0.0001
+    pf.hubble_constant = 0.7
+    for unit in mpc_conversion:
+        pf.units[unit+'h'] = pf.units[unit]
+        pf.units[unit+'cm'] = pf.units[unit]
+        pf.units[unit+'hcm'] = pf.units[unit]
+    return pf
+
+class TestFieldAccess(object):
+    description = None
+
+    def __init__(self, field_name, nproc):
+        # Note this should be a field name
+        self.field_name = field_name
+        self.description = "Accessing_%s_%s" % (field_name, nproc)
+        self.nproc = nproc
+
+    def __call__(self):
+        field = FieldInfo[self.field_name]
+        deps = field.get_dependencies()
+        fields = deps.requested + _base_fields
+        skip_grids = False
+        needs_spatial = False
+        for v in field.validators:
+            f = getattr(v, "fields", None)
+            if f: fields += f
+            if getattr(v, "ghost_zones", 0) > 0:
+                skip_grids = True
+            if hasattr(v, "ghost_zones"):
+                needs_spatial = True
+        pf = realistic_pf(fields, self.nproc)
+        # This gives unequal sized grids as well as subgrids
+        dd1 = pf.h.all_data()
+        dd2 = pf.h.all_data()
+        dd1.field_parameters.update(_sample_parameters)
+        dd2.field_parameters.update(_sample_parameters)
+        v1 = dd1[self.field_name]
+        conv = field._convert_function(dd1) or 1.0
+        if not needs_spatial:
+            assert_equal(v1, conv*field._function(field, dd2))
+        if not skip_grids:
+            for g in pf.h.grids:
+                g.field_parameters.update(_sample_parameters)
+                conv = field._convert_function(g) or 1.0
+                v1 = g[self.field_name]
+                g.clear_data()
+                g.field_parameters.update(_sample_parameters)
+                assert_equal(v1, conv*field._function(field, g))
+
+def test_all_fields():
+    for field in FieldInfo:
+        if field.startswith("CuttingPlane"): continue
+        if field.startswith("particle"): continue
+        if field.startswith("CIC"): continue
+        if FieldInfo[field].particle_type: continue
+        for nproc in [1, 4, 8]:
+            yield TestFieldAccess(field, nproc)


diff -r 06462f53a16bbbb175a88920ed0c219aacbdc4d9 -r ae66175414388a369d488d58fab308f84e80203b yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -448,7 +448,7 @@
 
 # This is rho_total / rho_cr(z).
 def _Convert_Overdensity(data):
-    return 1 / (rho_crit_now * data.pf.hubble_constant**2 * 
+    return 1.0 / (rho_crit_now * data.pf.hubble_constant**2 * 
                 (1+data.pf.current_redshift)**3)
 add_field("Overdensity",function=_Matter_Density,
           convert_function=_Convert_Overdensity, units=r"")
@@ -468,8 +468,8 @@
     else:
         omega_baryon_now = 0.0441
     return data['Density'] / (omega_baryon_now * rho_crit_now * 
-                              (data.pf['CosmologyHubbleConstantNow']**2) * 
-                              ((1+data.pf['CosmologyCurrentRedshift'])**3))
+                              (data.pf.hubble_constant**2) * 
+                              ((1+data.pf.current_redshift)**3))
 add_field("Baryon_Overdensity", function=_Baryon_Overdensity, 
           units=r"")
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/21333cf31d21/
changeset:   21333cf31d21
branch:      yt
user:        MatthewTurk
date:        2012-10-18 19:55:11
summary:     In ray tests: changing a couple assert_equals to assert_rel_equal, with
tolerance of 1e-14.
affected #:  2 files

diff -r 06462f53a16bbbb175a88920ed0c219aacbdc4d9 -r 21333cf31d213c5bac726e8b8f71f6c2f7520afa yt/data_objects/tests/test_rays.py
--- a/yt/data_objects/tests/test_rays.py
+++ b/yt/data_objects/tests/test_rays.py
@@ -9,7 +9,7 @@
     p2 = np.random.random(3)
 
     my_ray = pf.h.ray(p1, p2)
-    assert_equal(my_ray['dts'].sum(), 1.0)
+    assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
     ray_cells = my_ray['dts'] > 0
 
     # find cells intersected by the ray
@@ -26,6 +26,6 @@
     tout = tout.min(axis=0)
     my_cells = (tin < tout) & (tin < 1) & (tout > 0)
 
-    assert_equal(ray_cells.sum(), my_cells.sum())
-    assert_equal(my_ray['Density'][ray_cells].sum(),
-                 my_all['Density'][my_cells].sum())           
+    assert_rel_equal(ray_cells.sum(), my_cells.sum(), 14)
+    assert_rel_equal(my_ray['Density'][ray_cells].sum(),
+                     my_all['Density'][my_cells].sum(), 14)


diff -r 06462f53a16bbbb175a88920ed0c219aacbdc4d9 -r 21333cf31d213c5bac726e8b8f71f6c2f7520afa yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -26,7 +26,7 @@
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
-    assert_array_less, assert_string_equal
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
 
 def assert_rel_equal(a1, a2, decimels):
     return assert_almost_equal(a1/a2, 1.0, decimels)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a98f8afb7386/
changeset:   a98f8afb7386
branch:      yt
user:        ngoldbaum
date:        2012-10-18 20:53:46
summary:     Fixing field access for fields that depend on coordinate transformations.
affected #:  2 files

diff -r ae66175414388a369d488d58fab308f84e80203b -r a98f8afb73867ce458753dd52a54a8881e001c49 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -224,9 +224,9 @@
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
       
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
-    return get_sph_r(vectors, center)
+    return get_sph_r(coords)
 
 def _Convert_sph_r_CGS(data):
    return data.convert("cm")
@@ -241,7 +241,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
     return get_sph_theta(coords, normal)
 
@@ -254,7 +254,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
     return get_sph_phi(coords, normal)
 
@@ -266,7 +266,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
       
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
     return get_cyl_r(coords, normal)
 
@@ -286,7 +286,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
     return get_cyl_z(coords, normal)
 
@@ -303,7 +303,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
     return get_cyl_theta(coords, normal)
 
@@ -344,9 +344,9 @@
 
 def _cyl_RadialVelocity(field, data):
     normal = data.get_field_parameter("normal")
-    velocities = obtain_rv_vec(data).transpose()
+    velocities = obtain_rv_vec(data)
 
-    theta = np.tile(data['cyl_theta'], (3, 1)).transpose()
+    theta = data['cyl_theta']
 
     return get_cyl_r_component(velocities, theta, normal)
 
@@ -369,8 +369,8 @@
 
 def _cyl_TangentialVelocity(field, data):
     normal = data.get_field_parameter("normal")
-    velocities = obtain_rv_vec(data).transpose()
-    theta = np.tile(data['cyl_theta'], (3, 1)).transpose()
+    velocities = obtain_rv_vec(data)
+    theta = data['cyl_theta']
 
     return get_cyl_theta_component(velocities, theta, normal)
 
@@ -876,9 +876,9 @@
 
 def _RadialVelocity(field, data):
     normal = data.get_field_parameter("normal")
-    velocities = obtain_rv_vec(data).transpose()    
-    theta = np.tile(data['sph_theta'], (3, 1)).transpose()
-    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+    velocities = obtain_rv_vec(data)    
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
 
     return get_sph_r_component(velocities, theta, phi, normal)
 
@@ -1022,8 +1022,8 @@
 
     Bfields = np.array([data['Bx'], data['By'], data['Bz']])
 
-    theta = np.tile(data['sph_theta'], (3, 1)).transpose()
-    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
 
     return get_sph_theta_component(Bfields, theta, phi, normal)
 
@@ -1036,7 +1036,7 @@
 
     Bfields = np.array([data['Bx'], data['By'], data['Bz']])
 
-    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+    phi   = data['sph_phi']
 
     return get_sph_phi_component(Bfields, phi, normal)
 
@@ -1049,8 +1049,8 @@
 
     Bfields = np.array([data['Bx'], data['By'], data['Bz']])
 
-    theta = np.tile(data['sph_theta'], (3, 1)).transpose()
-    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
 
     return get_sph_r_component(Bfields, theta, phi, normal)
 


diff -r ae66175414388a369d488d58fab308f84e80203b -r a98f8afb73867ce458753dd52a54a8881e001c49 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -686,20 +686,29 @@
     # The spherical coordinates radius is simply the magnitude of the
     # coordinate vector.
 
-    return np.sqrt(np.sum(coords**2, axis=-1))
+    return np.sqrt(np.sum(coords**2, axis=0))
 
+def resize_vector(vector,vector_array):
+    if len(vector_array.shape) == 4:
+        res_vector = np.resize(vector,(3,1,1,1))
+    else:
+        res_vector = np.resize(vector,(3,1))
+    return res_vector
 
 def get_sph_theta(coords, normal):
     # The angle (theta) with respect to the normal (J), is the arccos
     # of the dot product of the normal with the normalized coordinate
     # vector.
     
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
+    res_normal = resize_vector(normal, coords)
 
-    JdotCoords = np.sum(J*coords,axis=-1)
+    tile_shape = [1] + list(coords.shape)[1:]
     
-    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
+    J = np.tile(res_normal,tile_shape)
+
+    JdotCoords = np.sum(J*coords,axis=0)
+    
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=0)) )
 
 def get_sph_phi(coords, normal):
     # We have freedom with respect to what axis (xprime) to define
@@ -713,13 +722,16 @@
     # vector.
 
     (xprime, yprime, zprime) = get_ortho_basis(normal)
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
 
-    Px = np.sum(Jx*coords,axis=-1)
-    Py = np.sum(Jy*coords,axis=-1)
+    res_xprime = resize_vector(xprime, coords)
+    res_yprime = resize_vector(yprime, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    Px = np.sum(Jx*coords,axis=0)
+    Py = np.sum(Jy*coords,axis=0)
     
     return np.arctan2(Py,Px)
 
@@ -727,20 +739,24 @@
     # The cross product of the normal (J) with a coordinate vector
     # gives a vector of magnitude equal to the cylindrical radius.
 
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal, tile_shape)
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
     
-    JcrossCoords = np.cross(J, coords)
-    return np.sqrt(np.sum(JcrossCoords**2, axis=-1))
+    JcrossCoords = np.cross(J, coords, axisa=0, axisb=0, axisc=0)
+    return np.sqrt(np.sum(JcrossCoords**2, axis=0))
 
 def get_cyl_z(coords, normal):
     # The dot product of the normal (J) with the coordinate vector 
     # gives the cylindrical height.
+
+    res_normal = resize_vector(normal, coords)
     
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal, tile_shape)
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
 
-    return np.sum(J*coords, axis=-1)  
+    return np.sum(J*coords, axis=0)  
 
 def get_cyl_theta(coords, normal):
     # This is identical to the spherical phi component
@@ -753,77 +769,96 @@
 
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
 
     rhat = Jx*np.cos(theta) + Jy*np.sin(theta)
 
-    return np.sum(vectors*rhat,axis=-1)
+    return np.sum(vectors*rhat,axis=0)
 
 def get_cyl_theta_component(vectors, theta, normal):
     # The theta component of a vector is the vector dotted with thetahat
     
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
 
     thetahat = -Jx*np.sin(theta) + Jy*np.cos(theta)
 
-    return np.sum(vectors*thetahat, axis=-1)
+    return np.sum(vectors*thetahat, axis=0)
 
 def get_cyl_z_component(vectors, normal):
     # The z component of a vector is the vector dotted with zhat
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    zhat = np.tile(zprime, tile_shape)
+    res_zprime = resize_vector(zprime, vectors)
 
-    return np.sum(vectors*zhat, axis=-1)
+    tile_shape = [1] + list(vectors.shape)[1:]
+    zhat = np.tile(res_zprime, tile_shape)
+
+    return np.sum(vectors*zhat, axis=0)
 
 def get_sph_r_component(vectors, theta, phi, normal):
     # The r component of a vector is the vector dotted with rhat
     
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
-    Jz = np.tile(zprime,tile_shape)
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
 
     rhat = Jx*np.sin(theta)*np.cos(phi) + \
            Jy*np.sin(theta)*np.sin(phi) + \
            Jz*np.cos(theta)
 
-    return np.sum(vectors*rhat, axis=-1)
+    return np.sum(vectors*rhat, axis=0)
 
 def get_sph_phi_component(vectors, phi, normal):
     # The phi component of a vector is the vector dotted with phihat
 
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
 
     phihat = -Jx*np.sin(phi) + Jy*np.cos(phi)
 
-    return np.sum(vectors*phihat, axis=-1)
+    return np.sum(vectors*phihat, axis=0)
 
 def get_sph_theta_component(vectors, theta, phi, normal):
     # The theta component of a vector is the vector dotted with thetahat
     
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
-    Jz = np.tile(zprime,tile_shape)
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
     
     thetahat = Jx*np.cos(theta)*np.cos(phi) + \
                Jy*np.cos(theta)*np.sin(phi) - \
                Jz*np.sin(theta)
 
-    return np.sum(vectors*thetahat, axis=-1)
+    return np.sum(vectors*thetahat, axis=0)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/ac14b13905d0/
changeset:   ac14b13905d0
branch:      yt
user:        MatthewTurk
date:        2012-10-18 21:04:12
summary:     Skipping WeakLensingConvergence.
affected #:  1 file

diff -r a98f8afb73867ce458753dd52a54a8881e001c49 -r ac14b13905d0563fd0c03d1ad2b92ab107e605d7 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -85,6 +85,7 @@
         if field.startswith("CuttingPlane"): continue
         if field.startswith("particle"): continue
         if field.startswith("CIC"): continue
+        if field.startswith("WeakLensingConvergence"): continue
         if FieldInfo[field].particle_type: continue
         for nproc in [1, 4, 8]:
             yield TestFieldAccess(field, nproc)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/4f48186e3644/
changeset:   4f48186e3644
branch:      yt
user:        ngoldbaum
date:        2012-10-18 21:18:49
summary:     Updating the coordinate conversion tests.
affected #:  1 file

diff -r ac14b13905d0563fd0c03d1ad2b92ab107e605d7 -r 4f48186e3644fc5a4a4ce418272b210a277299a8 yt/utilities/tests/test_coordinate_conversions.py
--- a/yt/utilities/tests/test_coordinate_conversions.py
+++ b/yt/utilities/tests/test_coordinate_conversions.py
@@ -18,7 +18,7 @@
                    [ 0.73186508, -0.3109153 ,  0.75728738],
                    [ 0.8757989 , -0.41475119, -0.57039201],
                    [ 0.58040762,  0.81969082,  0.46759728],
-                   [-0.89983356, -0.9853683 , -0.38355343]])
+                   [-0.89983356, -0.9853683 , -0.38355343]]).T
 
 def test_spherical_coordinate_conversion():
     normal = [0, 0, 1]
@@ -85,44 +85,41 @@
 def test_spherical_coordinate_projections():
     normal = [0, 0, 1]
     theta = get_sph_theta(coords, normal)
-    theta_pass = np.tile(theta, (3, 1)).T
     phi = get_sph_phi(coords, normal)
-    phi_pass = np.tile(phi, (3, 1)).T
-    zero = np.tile(0,coords.shape[0])
+    zero = np.tile(0,coords.shape[1])
 
     # Purely radial field
-    vecs = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)]).T
-    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta_pass, phi_pass, normal))
-    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi_pass, normal))
+    vecs = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
 
     # Purely toroidal field
-    vecs = np.array([-np.sin(phi), np.cos(phi), zero]).T
-    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta_pass, phi_pass, normal))
-    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta_pass, phi_pass, normal))
+    vecs = np.array([-np.sin(phi), np.cos(phi), zero])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
 
     # Purely poloidal field
-    vecs = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -np.sin(theta)]).T
-    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi_pass, normal))
-    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta_pass, phi_pass, normal))
+    vecs = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -np.sin(theta)])
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
 
 def test_cylindrical_coordinate_projections():
     normal = [0, 0, 1]
     theta = get_cyl_theta(coords, normal)
-    theta_pass = np.tile(theta, (3, 1)).T
     z = get_cyl_z(coords, normal)
-    zero = np.tile(0, coords.shape[0])
+    zero = np.tile(0, coords.shape[1])
 
     # Purely radial field
-    vecs = np.array([np.cos(theta), np.sin(theta), zero]).T
-    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta_pass, normal))
+    vecs = np.array([np.cos(theta), np.sin(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
     assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
 
     # Purely toroidal field
-    vecs = np.array([-np.sin(theta), np.cos(theta), zero]).T
+    vecs = np.array([-np.sin(theta), np.cos(theta), zero])
     assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
-    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta_pass, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))
 
     # Purely z field
-    vecs = np.array([zero, zero, z]).T
-    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta_pass, normal))
-    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta_pass, normal))
+    vecs = np.array([zero, zero, z])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0e2a183fc784/
changeset:   0e2a183fc784
branch:      yt
user:        ngoldbaum
date:        2012-10-18 21:21:06
summary:     Merged in MatthewTurk/yt (pull request #306)
affected #:  5 files

diff -r 21333cf31d213c5bac726e8b8f71f6c2f7520afa -r 0e2a183fc7848ef746d5608cfb9cc837fda95ea5 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -160,7 +160,8 @@
             # required attrs
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
-                pf.hubble_constant = pf.cosmological_simulation = 0.0
+                pf.cosmological_simulation = 0.0
+            pf.hubble_constant = 0.7
             pf.domain_left_edge = np.zeros(3, 'float64')
             pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3


diff -r 21333cf31d213c5bac726e8b8f71f6c2f7520afa -r 0e2a183fc7848ef746d5608cfb9cc837fda95ea5 yt/data_objects/tests/test_fields.py
--- /dev/null
+++ b/yt/data_objects/tests/test_fields.py
@@ -0,0 +1,91 @@
+from yt.testing import *
+import numpy as np
+from yt.data_objects.field_info_container import \
+    FieldInfo
+import yt.data_objects.universal_fields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    np.seterr(all = 'ignore')
+
+_sample_parameters = dict(
+    axis = 0,
+    center = np.array((0.0, 0.0, 0.0)),
+    bulk_velocity = np.array((0.0, 0.0, 0.0)),
+    normal = np.array((0.0, 0.0, 1.0)),
+    cp_x_vec = np.array((1.0, 0.0, 0.0)),
+    cp_y_vec = np.array((0.0, 1.0, 0.0)),
+    cp_z_vec = np.array((0.0, 0.0, 1.0)),
+)
+
+_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
+
+def realistic_pf(fields, nprocs):
+    pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
+    pf.parameters["HydroMethod"] = "streaming"
+    pf.parameters["Gamma"] = 5.0/3.0
+    pf.parameters["EOSType"] = 1.0
+    pf.parameters["EOSSoundSpeed"] = 1.0
+    pf.conversion_factors["Time"] = 1.0
+    pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
+    pf.current_redshift = 0.0001
+    pf.hubble_constant = 0.7
+    for unit in mpc_conversion:
+        pf.units[unit+'h'] = pf.units[unit]
+        pf.units[unit+'cm'] = pf.units[unit]
+        pf.units[unit+'hcm'] = pf.units[unit]
+    return pf
+
+class TestFieldAccess(object):
+    description = None
+
+    def __init__(self, field_name, nproc):
+        # Note this should be a field name
+        self.field_name = field_name
+        self.description = "Accessing_%s_%s" % (field_name, nproc)
+        self.nproc = nproc
+
+    def __call__(self):
+        field = FieldInfo[self.field_name]
+        deps = field.get_dependencies()
+        fields = deps.requested + _base_fields
+        skip_grids = False
+        needs_spatial = False
+        for v in field.validators:
+            f = getattr(v, "fields", None)
+            if f: fields += f
+            if getattr(v, "ghost_zones", 0) > 0:
+                skip_grids = True
+            if hasattr(v, "ghost_zones"):
+                needs_spatial = True
+        pf = realistic_pf(fields, self.nproc)
+        # This gives unequal sized grids as well as subgrids
+        dd1 = pf.h.all_data()
+        dd2 = pf.h.all_data()
+        dd1.field_parameters.update(_sample_parameters)
+        dd2.field_parameters.update(_sample_parameters)
+        v1 = dd1[self.field_name]
+        conv = field._convert_function(dd1) or 1.0
+        if not needs_spatial:
+            assert_equal(v1, conv*field._function(field, dd2))
+        if not skip_grids:
+            for g in pf.h.grids:
+                g.field_parameters.update(_sample_parameters)
+                conv = field._convert_function(g) or 1.0
+                v1 = g[self.field_name]
+                g.clear_data()
+                g.field_parameters.update(_sample_parameters)
+                assert_equal(v1, conv*field._function(field, g))
+
+def test_all_fields():
+    for field in FieldInfo:
+        if field.startswith("CuttingPlane"): continue
+        if field.startswith("particle"): continue
+        if field.startswith("CIC"): continue
+        if field.startswith("WeakLensingConvergence"): continue
+        if FieldInfo[field].particle_type: continue
+        for nproc in [1, 4, 8]:
+            yield TestFieldAccess(field, nproc)


diff -r 21333cf31d213c5bac726e8b8f71f6c2f7520afa -r 0e2a183fc7848ef746d5608cfb9cc837fda95ea5 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -224,9 +224,9 @@
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
       
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
-    return get_sph_r(vectors, center)
+    return get_sph_r(coords)
 
 def _Convert_sph_r_CGS(data):
    return data.convert("cm")
@@ -241,7 +241,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
     return get_sph_theta(coords, normal)
 
@@ -254,7 +254,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
     return get_sph_phi(coords, normal)
 
@@ -266,7 +266,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
       
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
     return get_cyl_r(coords, normal)
 
@@ -286,7 +286,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
     return get_cyl_z(coords, normal)
 
@@ -303,7 +303,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
     return get_cyl_theta(coords, normal)
 
@@ -344,9 +344,9 @@
 
 def _cyl_RadialVelocity(field, data):
     normal = data.get_field_parameter("normal")
-    velocities = obtain_rv_vec(data).transpose()
+    velocities = obtain_rv_vec(data)
 
-    theta = np.tile(data['cyl_theta'], (3, 1)).transpose()
+    theta = data['cyl_theta']
 
     return get_cyl_r_component(velocities, theta, normal)
 
@@ -369,8 +369,8 @@
 
 def _cyl_TangentialVelocity(field, data):
     normal = data.get_field_parameter("normal")
-    velocities = obtain_rv_vec(data).transpose()
-    theta = np.tile(data['cyl_theta'], (3, 1)).transpose()
+    velocities = obtain_rv_vec(data)
+    theta = data['cyl_theta']
 
     return get_cyl_theta_component(velocities, theta, normal)
 
@@ -448,7 +448,7 @@
 
 # This is rho_total / rho_cr(z).
 def _Convert_Overdensity(data):
-    return 1 / (rho_crit_now * data.pf.hubble_constant**2 * 
+    return 1.0 / (rho_crit_now * data.pf.hubble_constant**2 * 
                 (1+data.pf.current_redshift)**3)
 add_field("Overdensity",function=_Matter_Density,
           convert_function=_Convert_Overdensity, units=r"")
@@ -468,8 +468,8 @@
     else:
         omega_baryon_now = 0.0441
     return data['Density'] / (omega_baryon_now * rho_crit_now * 
-                              (data.pf['CosmologyHubbleConstantNow']**2) * 
-                              ((1+data.pf['CosmologyCurrentRedshift'])**3))
+                              (data.pf.hubble_constant**2) * 
+                              ((1+data.pf.current_redshift)**3))
 add_field("Baryon_Overdensity", function=_Baryon_Overdensity, 
           units=r"")
 
@@ -876,9 +876,9 @@
 
 def _RadialVelocity(field, data):
     normal = data.get_field_parameter("normal")
-    velocities = obtain_rv_vec(data).transpose()    
-    theta = np.tile(data['sph_theta'], (3, 1)).transpose()
-    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+    velocities = obtain_rv_vec(data)    
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
 
     return get_sph_r_component(velocities, theta, phi, normal)
 
@@ -1022,8 +1022,8 @@
 
     Bfields = np.array([data['Bx'], data['By'], data['Bz']])
 
-    theta = np.tile(data['sph_theta'], (3, 1)).transpose()
-    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
 
     return get_sph_theta_component(Bfields, theta, phi, normal)
 
@@ -1036,7 +1036,7 @@
 
     Bfields = np.array([data['Bx'], data['By'], data['Bz']])
 
-    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+    phi   = data['sph_phi']
 
     return get_sph_phi_component(Bfields, phi, normal)
 
@@ -1049,8 +1049,8 @@
 
     Bfields = np.array([data['Bx'], data['By'], data['Bz']])
 
-    theta = np.tile(data['sph_theta'], (3, 1)).transpose()
-    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
 
     return get_sph_r_component(Bfields, theta, phi, normal)
 


diff -r 21333cf31d213c5bac726e8b8f71f6c2f7520afa -r 0e2a183fc7848ef746d5608cfb9cc837fda95ea5 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -686,20 +686,29 @@
     # The spherical coordinates radius is simply the magnitude of the
     # coordinate vector.
 
-    return np.sqrt(np.sum(coords**2, axis=-1))
+    return np.sqrt(np.sum(coords**2, axis=0))
 
+def resize_vector(vector,vector_array):
+    if len(vector_array.shape) == 4:
+        res_vector = np.resize(vector,(3,1,1,1))
+    else:
+        res_vector = np.resize(vector,(3,1))
+    return res_vector
 
 def get_sph_theta(coords, normal):
     # The angle (theta) with respect to the normal (J), is the arccos
     # of the dot product of the normal with the normalized coordinate
     # vector.
     
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
+    res_normal = resize_vector(normal, coords)
 
-    JdotCoords = np.sum(J*coords,axis=-1)
+    tile_shape = [1] + list(coords.shape)[1:]
     
-    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
+    J = np.tile(res_normal,tile_shape)
+
+    JdotCoords = np.sum(J*coords,axis=0)
+    
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=0)) )
 
 def get_sph_phi(coords, normal):
     # We have freedom with respect to what axis (xprime) to define
@@ -713,13 +722,16 @@
     # vector.
 
     (xprime, yprime, zprime) = get_ortho_basis(normal)
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
 
-    Px = np.sum(Jx*coords,axis=-1)
-    Py = np.sum(Jy*coords,axis=-1)
+    res_xprime = resize_vector(xprime, coords)
+    res_yprime = resize_vector(yprime, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    Px = np.sum(Jx*coords,axis=0)
+    Py = np.sum(Jy*coords,axis=0)
     
     return np.arctan2(Py,Px)
 
@@ -727,20 +739,24 @@
     # The cross product of the normal (J) with a coordinate vector
     # gives a vector of magnitude equal to the cylindrical radius.
 
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal, tile_shape)
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
     
-    JcrossCoords = np.cross(J, coords)
-    return np.sqrt(np.sum(JcrossCoords**2, axis=-1))
+    JcrossCoords = np.cross(J, coords, axisa=0, axisb=0, axisc=0)
+    return np.sqrt(np.sum(JcrossCoords**2, axis=0))
 
 def get_cyl_z(coords, normal):
     # The dot product of the normal (J) with the coordinate vector 
     # gives the cylindrical height.
+
+    res_normal = resize_vector(normal, coords)
     
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal, tile_shape)
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
 
-    return np.sum(J*coords, axis=-1)  
+    return np.sum(J*coords, axis=0)  
 
 def get_cyl_theta(coords, normal):
     # This is identical to the spherical phi component
@@ -753,77 +769,96 @@
 
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
 
     rhat = Jx*np.cos(theta) + Jy*np.sin(theta)
 
-    return np.sum(vectors*rhat,axis=-1)
+    return np.sum(vectors*rhat,axis=0)
 
 def get_cyl_theta_component(vectors, theta, normal):
     # The theta component of a vector is the vector dotted with thetahat
     
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
 
     thetahat = -Jx*np.sin(theta) + Jy*np.cos(theta)
 
-    return np.sum(vectors*thetahat, axis=-1)
+    return np.sum(vectors*thetahat, axis=0)
 
 def get_cyl_z_component(vectors, normal):
     # The z component of a vector is the vector dotted with zhat
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    zhat = np.tile(zprime, tile_shape)
+    res_zprime = resize_vector(zprime, vectors)
 
-    return np.sum(vectors*zhat, axis=-1)
+    tile_shape = [1] + list(vectors.shape)[1:]
+    zhat = np.tile(res_zprime, tile_shape)
+
+    return np.sum(vectors*zhat, axis=0)
 
 def get_sph_r_component(vectors, theta, phi, normal):
     # The r component of a vector is the vector dotted with rhat
     
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
-    Jz = np.tile(zprime,tile_shape)
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
 
     rhat = Jx*np.sin(theta)*np.cos(phi) + \
            Jy*np.sin(theta)*np.sin(phi) + \
            Jz*np.cos(theta)
 
-    return np.sum(vectors*rhat, axis=-1)
+    return np.sum(vectors*rhat, axis=0)
 
 def get_sph_phi_component(vectors, phi, normal):
     # The phi component of a vector is the vector dotted with phihat
 
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
 
     phihat = -Jx*np.sin(phi) + Jy*np.cos(phi)
 
-    return np.sum(vectors*phihat, axis=-1)
+    return np.sum(vectors*phihat, axis=0)
 
 def get_sph_theta_component(vectors, theta, phi, normal):
     # The theta component of a vector is the vector dotted with thetahat
     
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
-    Jz = np.tile(zprime,tile_shape)
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
     
     thetahat = Jx*np.cos(theta)*np.cos(phi) + \
                Jy*np.cos(theta)*np.sin(phi) - \
                Jz*np.sin(theta)
 
-    return np.sum(vectors*thetahat, axis=-1)
+    return np.sum(vectors*thetahat, axis=0)


diff -r 21333cf31d213c5bac726e8b8f71f6c2f7520afa -r 0e2a183fc7848ef746d5608cfb9cc837fda95ea5 yt/utilities/tests/test_coordinate_conversions.py
--- a/yt/utilities/tests/test_coordinate_conversions.py
+++ b/yt/utilities/tests/test_coordinate_conversions.py
@@ -18,7 +18,7 @@
                    [ 0.73186508, -0.3109153 ,  0.75728738],
                    [ 0.8757989 , -0.41475119, -0.57039201],
                    [ 0.58040762,  0.81969082,  0.46759728],
-                   [-0.89983356, -0.9853683 , -0.38355343]])
+                   [-0.89983356, -0.9853683 , -0.38355343]]).T
 
 def test_spherical_coordinate_conversion():
     normal = [0, 0, 1]
@@ -85,44 +85,41 @@
 def test_spherical_coordinate_projections():
     normal = [0, 0, 1]
     theta = get_sph_theta(coords, normal)
-    theta_pass = np.tile(theta, (3, 1)).T
     phi = get_sph_phi(coords, normal)
-    phi_pass = np.tile(phi, (3, 1)).T
-    zero = np.tile(0,coords.shape[0])
+    zero = np.tile(0,coords.shape[1])
 
     # Purely radial field
-    vecs = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)]).T
-    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta_pass, phi_pass, normal))
-    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi_pass, normal))
+    vecs = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
 
     # Purely toroidal field
-    vecs = np.array([-np.sin(phi), np.cos(phi), zero]).T
-    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta_pass, phi_pass, normal))
-    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta_pass, phi_pass, normal))
+    vecs = np.array([-np.sin(phi), np.cos(phi), zero])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
 
     # Purely poloidal field
-    vecs = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -np.sin(theta)]).T
-    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi_pass, normal))
-    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta_pass, phi_pass, normal))
+    vecs = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -np.sin(theta)])
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
 
 def test_cylindrical_coordinate_projections():
     normal = [0, 0, 1]
     theta = get_cyl_theta(coords, normal)
-    theta_pass = np.tile(theta, (3, 1)).T
     z = get_cyl_z(coords, normal)
-    zero = np.tile(0, coords.shape[0])
+    zero = np.tile(0, coords.shape[1])
 
     # Purely radial field
-    vecs = np.array([np.cos(theta), np.sin(theta), zero]).T
-    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta_pass, normal))
+    vecs = np.array([np.cos(theta), np.sin(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
     assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
 
     # Purely toroidal field
-    vecs = np.array([-np.sin(theta), np.cos(theta), zero]).T
+    vecs = np.array([-np.sin(theta), np.cos(theta), zero])
     assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
-    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta_pass, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))
 
     # Purely z field
-    vecs = np.array([zero, zero, z]).T
-    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta_pass, normal))
-    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta_pass, normal))
+    vecs = np.array([zero, zero, z])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b33c3a4a1cde/
changeset:   b33c3a4a1cde
branch:      yt
user:        jsoishi
date:        2012-10-18 21:49:02
summary:     fixed typo
affected #:  1 file

diff -r 0e2a183fc7848ef746d5608cfb9cc837fda95ea5 -r b33c3a4a1cde5b796cc09f99b78c683518b4cd18 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -28,8 +28,8 @@
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
     assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
 
-def assert_rel_equal(a1, a2, decimels):
-    return assert_almost_equal(a1/a2, 1.0, decimels)
+def assert_rel_equal(a1, a2, decimals):
+    return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/46dc87531a8e/
changeset:   46dc87531a8e
branch:      yt
user:        MatthewTurk
date:        2012-10-19 13:39:14
summary:     Changing Extrema derived quantity to use nanmin and nanmax
affected #:  2 files

diff -r b33c3a4a1cde5b796cc09f99b78c683518b4cd18 -r 46dc87531a8e2e450f2c0be2b3eac933c9c36a85 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -598,16 +598,16 @@
                     continue
             else:
                 nz_filter = None
-            mins.append(data[field][nz_filter].min())
-            maxs.append(data[field][nz_filter].max())
+            mins.append(np.nanmin(data[field][nz_filter]))
+            maxs.append(np.nanmax(data[field][nz_filter]))
         else:
             if this_filter.any():
                 if non_zero:
                     nz_filter = ((this_filter) &
                                  (data[field][this_filter] > 0.0))
                 else: nz_filter = this_filter
-                mins.append(data[field][nz_filter].min())
-                maxs.append(data[field][nz_filter].max())
+                mins.append(np.nanmin(data[field][nz_filter]))
+                maxs.append(np.nanmax(data[field][nz_filter]))
             else:
                 mins.append(1e90)
                 maxs.append(-1e90)


diff -r b33c3a4a1cde5b796cc09f99b78c683518b4cd18 -r 46dc87531a8e2e450f2c0be2b3eac933c9c36a85 yt/data_objects/test_derived_quantities.py
--- /dev/null
+++ b/yt/data_objects/test_derived_quantities.py
@@ -0,0 +1,20 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+import numpy as np
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_extrema():
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(16, nprocs = nprocs)
+        sp = pf.h.sphere("c", (0.25, '1'))
+        (mi, ma), = sp.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(sp["Density"])
+        yield assert_equal, ma, np.nanmax(sp["Density"])
+        dd = pf.h.all_data()
+        (mi, ma), = dd.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(dd["Density"])
+        yield assert_equal, ma, np.nanmax(dd["Density"])



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8194d4172fc3/
changeset:   8194d4172fc3
branch:      yt
user:        MatthewTurk
date:        2012-10-19 14:01:53
summary:     Actually testing for NaNs -- and fixing some imports, moving some files.
affected #:  2 files

diff -r 46dc87531a8e2e450f2c0be2b3eac933c9c36a85 -r 8194d4172fc3edaea21bfd7a775c3573f0706d2d yt/data_objects/test_derived_quantities.py
--- a/yt/data_objects/test_derived_quantities.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from yt.testing import *
-from yt.data_objects.profiles import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
-import numpy as np
-
-def setup():
-    from yt.config import ytcfg
-    ytcfg["yt","__withintesting"] = "True"
-
-def test_extrema():
-    for nprocs in [1, 2, 4, 8]:
-        pf = fake_random_pf(16, nprocs = nprocs)
-        sp = pf.h.sphere("c", (0.25, '1'))
-        (mi, ma), = sp.quantities["Extrema"]("Density")
-        yield assert_equal, mi, np.nanmin(sp["Density"])
-        yield assert_equal, ma, np.nanmax(sp["Density"])
-        dd = pf.h.all_data()
-        (mi, ma), = dd.quantities["Extrema"]("Density")
-        yield assert_equal, mi, np.nanmin(dd["Density"])
-        yield assert_equal, ma, np.nanmax(dd["Density"])


diff -r 46dc87531a8e2e450f2c0be2b3eac933c9c36a85 -r 8194d4172fc3edaea21bfd7a775c3573f0706d2d yt/data_objects/tests/test_derived_quantities.py
--- /dev/null
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -0,0 +1,24 @@
+from yt.testing import *
+import numpy as np
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_extrema():
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(16, nprocs = nprocs, fields = ("Density",
+                "x-velocity", "y-velocity", "z-velocity"))
+        sp = pf.h.sphere("c", (0.25, '1'))
+        (mi, ma), = sp.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(sp["Density"])
+        yield assert_equal, ma, np.nanmax(sp["Density"])
+        dd = pf.h.all_data()
+        (mi, ma), = dd.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(dd["Density"])
+        yield assert_equal, ma, np.nanmax(dd["Density"])
+        sp = pf.h.sphere("max", (0.25, '1'))
+        yield assert_equal, np.any(np.isnan(sp["RadialVelocity"])), True
+        (mi, ma), = dd.quantities["Extrema"]("RadialVelocity")
+        yield assert_equal, mi, np.nanmin(dd["RadialVelocity"])
+        yield assert_equal, ma, np.nanmax(dd["RadialVelocity"])



https://bitbucket.org/yt_analysis/yt-3.0/changeset/47ed8fb04ea4/
changeset:   47ed8fb04ea4
branch:      yt
user:        samskillman
date:        2012-10-19 16:49:20
summary:     Only reduce magnitudes if they were requested. closes issue 452.
affected #:  1 file

diff -r 598c0df9255853854419600c60ea09246a404213 -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -146,7 +146,8 @@
     @parallel_passthrough
     def _finalize_parallel(self,data):
         self.streamlines = self.comm.mpi_allreduce(self.streamlines, op='sum')
-        self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
+        if self.get_magnitude:
+            self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
         
     def _integrate_through_brick(self, node, stream, step,
                                  periodic=False, mag=None):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/56c2d60a99c7/
changeset:   56c2d60a99c7
branch:      yt
user:        samskillman
date:        2012-10-19 16:50:14
summary:     Merging
affected #:  18 files

diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
-include distribute_setup.py
+include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.hh *.h README* CREDITS FUNDING LICENSE
+recursive-include yt *.pyx *.pxd *.hh *.h README*


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -606,6 +606,7 @@
 
         if newProfile:
             mylog.info("Writing halo %d" % halo['id'])
+            if os.path.exists(filename): os.remove(filename)
             if filename.endswith('.h5'):
                 profile.write_out_h5(filename)
             else:
@@ -717,7 +718,9 @@
             Default=True.
         njobs : int
             The number of jobs over which to split the projections.  Set
-            to -1 so that each halo is done by a single processor.
+            to -1 so that each halo is done by a single processor.  Halo 
+            projections do not currently work in parallel, so this must 
+            be set to -1.
             Default: -1.
         dynamic : bool
             If True, distribute halos using a task queue.  If False,
@@ -731,6 +734,12 @@
 
         """
 
+        # Halo projections cannot run in parallel because they are done by 
+        # giving a data source to the projection object.
+        if njobs > 0:
+            mylog.warn("Halo projections cannot use more than one processor per halo, setting njobs to -1.")
+            njobs = -1
+        
         # Get list of halos for projecting.
         if halo_list == 'filtered':
             halo_projection_list = self.filtered_halos


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -237,6 +237,7 @@
     def __set_default_field_parameters(self):
         self.set_field_parameter("center",np.zeros(3,dtype='float64'))
         self.set_field_parameter("bulk_velocity",np.zeros(3,dtype='float64'))
+        self.set_field_parameter("normal",np.array([0,0,1],dtype='float64'))
 
     def _set_center(self, center):
         if center is None:


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -598,16 +598,16 @@
                     continue
             else:
                 nz_filter = None
-            mins.append(data[field][nz_filter].min())
-            maxs.append(data[field][nz_filter].max())
+            mins.append(np.nanmin(data[field][nz_filter]))
+            maxs.append(np.nanmax(data[field][nz_filter]))
         else:
             if this_filter.any():
                 if non_zero:
                     nz_filter = ((this_filter) &
                                  (data[field][this_filter] > 0.0))
                 else: nz_filter = this_filter
-                mins.append(data[field][nz_filter].min())
-                maxs.append(data[field][nz_filter].max())
+                mins.append(np.nanmin(data[field][nz_filter]))
+                maxs.append(np.nanmax(data[field][nz_filter]))
             else:
                 mins.append(1e90)
                 maxs.append(-1e90)


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -160,7 +160,8 @@
             # required attrs
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
-                pf.hubble_constant = pf.cosmological_simulation = 0.0
+                pf.cosmological_simulation = 0.0
+            pf.hubble_constant = 0.7
             pf.domain_left_edge = np.zeros(3, 'float64')
             pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/data_objects/tests/test_derived_quantities.py
--- /dev/null
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -0,0 +1,24 @@
+from yt.testing import *
+import numpy as np
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_extrema():
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(16, nprocs = nprocs, fields = ("Density",
+                "x-velocity", "y-velocity", "z-velocity"))
+        sp = pf.h.sphere("c", (0.25, '1'))
+        (mi, ma), = sp.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(sp["Density"])
+        yield assert_equal, ma, np.nanmax(sp["Density"])
+        dd = pf.h.all_data()
+        (mi, ma), = dd.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(dd["Density"])
+        yield assert_equal, ma, np.nanmax(dd["Density"])
+        sp = pf.h.sphere("max", (0.25, '1'))
+        yield assert_equal, np.any(np.isnan(sp["RadialVelocity"])), True
+        (mi, ma), = dd.quantities["Extrema"]("RadialVelocity")
+        yield assert_equal, mi, np.nanmin(dd["RadialVelocity"])
+        yield assert_equal, ma, np.nanmax(dd["RadialVelocity"])


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/data_objects/tests/test_fields.py
--- /dev/null
+++ b/yt/data_objects/tests/test_fields.py
@@ -0,0 +1,91 @@
+from yt.testing import *
+import numpy as np
+from yt.data_objects.field_info_container import \
+    FieldInfo
+import yt.data_objects.universal_fields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    np.seterr(all = 'ignore')
+
+_sample_parameters = dict(
+    axis = 0,
+    center = np.array((0.0, 0.0, 0.0)),
+    bulk_velocity = np.array((0.0, 0.0, 0.0)),
+    normal = np.array((0.0, 0.0, 1.0)),
+    cp_x_vec = np.array((1.0, 0.0, 0.0)),
+    cp_y_vec = np.array((0.0, 1.0, 0.0)),
+    cp_z_vec = np.array((0.0, 0.0, 1.0)),
+)
+
+_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
+
+def realistic_pf(fields, nprocs):
+    pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
+    pf.parameters["HydroMethod"] = "streaming"
+    pf.parameters["Gamma"] = 5.0/3.0
+    pf.parameters["EOSType"] = 1.0
+    pf.parameters["EOSSoundSpeed"] = 1.0
+    pf.conversion_factors["Time"] = 1.0
+    pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
+    pf.current_redshift = 0.0001
+    pf.hubble_constant = 0.7
+    for unit in mpc_conversion:
+        pf.units[unit+'h'] = pf.units[unit]
+        pf.units[unit+'cm'] = pf.units[unit]
+        pf.units[unit+'hcm'] = pf.units[unit]
+    return pf
+
+class TestFieldAccess(object):
+    description = None
+
+    def __init__(self, field_name, nproc):
+        # Note this should be a field name
+        self.field_name = field_name
+        self.description = "Accessing_%s_%s" % (field_name, nproc)
+        self.nproc = nproc
+
+    def __call__(self):
+        field = FieldInfo[self.field_name]
+        deps = field.get_dependencies()
+        fields = deps.requested + _base_fields
+        skip_grids = False
+        needs_spatial = False
+        for v in field.validators:
+            f = getattr(v, "fields", None)
+            if f: fields += f
+            if getattr(v, "ghost_zones", 0) > 0:
+                skip_grids = True
+            if hasattr(v, "ghost_zones"):
+                needs_spatial = True
+        pf = realistic_pf(fields, self.nproc)
+        # This gives unequal sized grids as well as subgrids
+        dd1 = pf.h.all_data()
+        dd2 = pf.h.all_data()
+        dd1.field_parameters.update(_sample_parameters)
+        dd2.field_parameters.update(_sample_parameters)
+        v1 = dd1[self.field_name]
+        conv = field._convert_function(dd1) or 1.0
+        if not needs_spatial:
+            assert_equal(v1, conv*field._function(field, dd2))
+        if not skip_grids:
+            for g in pf.h.grids:
+                g.field_parameters.update(_sample_parameters)
+                conv = field._convert_function(g) or 1.0
+                v1 = g[self.field_name]
+                g.clear_data()
+                g.field_parameters.update(_sample_parameters)
+                assert_equal(v1, conv*field._function(field, g))
+
+def test_all_fields():
+    for field in FieldInfo:
+        if field.startswith("CuttingPlane"): continue
+        if field.startswith("particle"): continue
+        if field.startswith("CIC"): continue
+        if field.startswith("WeakLensingConvergence"): continue
+        if FieldInfo[field].particle_type: continue
+        for nproc in [1, 4, 8]:
+            yield TestFieldAccess(field, nproc)


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/data_objects/tests/test_ortho_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -0,0 +1,25 @@
+from yt.testing import *
+
+def test_ortho_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
+
+    axes = ['x', 'y', 'z']
+    for ax, an in enumerate(axes):
+        ocoord = np.random.random(2)
+
+        my_oray = pf.h.ortho_ray(ax, ocoord)
+
+        my_axes = range(3)
+        del my_axes[ax]
+
+        # find the cells intersected by the ortho ray
+        my_all = pf.h.all_data()
+        my_cells = (np.abs(my_all[axes[my_axes[0]]] - ocoord[0]) <= 
+                    0.5 * dx[my_axes[0]]) & \
+                   (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
+                    0.5 * dx[my_axes[1]])
+
+        assert_equal(my_oray['Density'].sum(),
+                     my_all['Density'][my_cells].sum())


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/data_objects/tests/test_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_rays.py
@@ -0,0 +1,31 @@
+from yt.testing import *
+
+def test_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+      pf.domain_dimensions
+
+    p1 = np.random.random(3)
+    p2 = np.random.random(3)
+
+    my_ray = pf.h.ray(p1, p2)
+    assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
+    ray_cells = my_ray['dts'] > 0
+
+    # find cells intersected by the ray
+    my_all = pf.h.all_data()
+    
+    dt = np.abs(dx / (p2 - p1))
+    tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
+                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
+                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
+    tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
+                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
+                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
+    tin = tin.max(axis=0)
+    tout = tout.min(axis=0)
+    my_cells = (tin < tout) & (tin < 1) & (tout > 0)
+
+    assert_rel_equal(ray_cells.sum(), my_cells.sum(), 14)
+    assert_rel_equal(my_ray['Density'][ray_cells].sum(),
+                     my_all['Density'][my_cells].sum(), 14)


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -32,7 +32,7 @@
 
 from yt.funcs import *
 
-from yt.utilities.lib import CICDeposit_3, obtain_rvec
+from yt.utilities.lib import CICDeposit_3, obtain_rvec, obtain_rv_vec
 from yt.utilities.cosmology import Cosmology
 from field_info_container import \
     add_field, \
@@ -54,7 +54,19 @@
      kboltz, \
      G, \
      rho_crit_now, \
-     speed_of_light_cgs
+     speed_of_light_cgs, \
+     km_per_cm
+
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
      
 # Note that, despite my newfound efforts to comply with PEP-8,
 # I violate it here in order to keep the name/func_name relationship
@@ -179,12 +191,8 @@
 
 def _VelocityMagnitude(field, data):
     """M{|v|}"""
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    return ( (data["x-velocity"]-bulk_velocity[0])**2.0 + \
-             (data["y-velocity"]-bulk_velocity[1])**2.0 + \
-             (data["z-velocity"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
+    velocities = obtain_rv_vec(data)
+    return np.sqrt(np.sum(velocities**2,axis=0))
 add_field("VelocityMagnitude", function=_VelocityMagnitude,
           take_log=False, units=r"\rm{cm}/\rm{s}")
 
@@ -194,13 +202,6 @@
           function=_TangentialOverVelocityMagnitude,
           take_log=False)
 
-def _TangentialVelocity(field, data):
-    return np.sqrt(data["VelocityMagnitude"]**2.0
-                 - data["RadialVelocity"]**2.0)
-add_field("TangentialVelocity", 
-          function=_TangentialVelocity,
-          take_log=False, units=r"\rm{cm}/\rm{s}")
-
 def _Pressure(field, data):
     """M{(Gamma-1.0)*rho*E}"""
     return (data.pf["Gamma"] - 1.0) * \
@@ -223,14 +224,9 @@
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
       
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The spherical coordinates radius is simply the magnitude of the
-    ## coords vector.
-
-    return np.sqrt(np.sum(coords**2,axis=-1))
+    return get_sph_r(coords)
 
 def _Convert_sph_r_CGS(data):
    return data.convert("cm")
@@ -245,20 +241,9 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The angle (theta) with respect to the normal (J), is the arccos
-    ## of the dot product of the normal with the normalized coords
-    ## vector.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    JdotCoords = np.sum(J*coords,axis=-1)
-    
-    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
+    return get_sph_theta(coords, normal)
 
 add_field("sph_theta", function=_sph_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
@@ -269,54 +254,21 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
-    
-    ## We have freedom with respect to what axis (xprime) to define
-    ## the disk angle. Here I've chosen to use the axis that is
-    ## perpendicular to the normal and the y-axis. When normal ==
-    ## y-hat, then set xprime = z-hat. With this definition, when
-    ## normal == z-hat (as is typical), then xprime == x-hat.
-    ##
-    ## The angle is then given by the arctan of the ratio of the
-    ## yprime-component and the xprime-component of the coords vector.
+    coords = obtain_rvec(data)
 
-    xprime = np.cross([0.0,1.0,0.0],normal)
-    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
-    yprime = np.cross(normal,xprime)
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
-    
-    Px = np.sum(Jx*coords,axis=-1)
-    Py = np.sum(Jy*coords,axis=-1)
-    
-    return np.arctan2(Py,Px)
+    return get_sph_phi(coords, normal)
 
 add_field("sph_phi", function=_sph_phi,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
 
-
-
 ### cylindrical coordinates: R (radius in the cylinder's plane)
 def _cyl_R(field, data):
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
       
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The cross product of the normal (J) with the coords vector
-    ## gives a vector of magnitude equal to the cylindrical radius.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    JcrossCoords = np.cross(J,coords)
-    return np.sqrt(np.sum(JcrossCoords**2,axis=-1))
+    return get_cyl_r(coords, normal)
 
 def _Convert_cyl_R_CGS(data):
    return data.convert("cm")
@@ -324,6 +276,9 @@
 add_field("cyl_R", function=_cyl_R,
          validators=[ValidateParameter("center"),ValidateParameter("normal")],
          convert_function = _Convert_cyl_R_CGS, units=r"\rm{cm}")
+add_field("cyl_RCode", function=_cyl_R,
+          validators=[ValidateParameter("center"),ValidateParameter("normal")],
+          units=r"Radius (code)")
 
 
 ### cylindrical coordinates: z (height above the cylinder's plane)
@@ -331,17 +286,9 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The dot product of the normal (J) with the coords vector gives
-    ## the cylindrical height.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    return np.sum(J*coords,axis=-1)  
+    return get_cyl_z(coords, normal)
 
 def _Convert_cyl_z_CGS(data):
    return data.convert("cm")
@@ -352,14 +299,17 @@
 
 
 ### cylindrical coordinates: theta (angle in the cylinder's plane)
-### [This is identical to the spherical coordinate's 'phi' angle.]
 def _cyl_theta(field, data):
-    return data['sph_phi']
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = obtain_rvec(data)
+
+    return get_cyl_theta(coords, normal)
 
 add_field("cyl_theta", function=_cyl_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
 
-
 ### The old field DiskAngle is the same as the spherical coordinates'
 ### 'theta' angle. I'm keeping DiskAngle for backwards compatibility.
 def _DiskAngle(field, data):
@@ -392,6 +342,54 @@
                       ValidateParameter("normal")],
           units=r"AU", display_field=False)
 
+def _cyl_RadialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)
+
+    theta = data['cyl_theta']
+
+    return get_cyl_r_component(velocities, theta, normal)
+
+def _cyl_RadialVelocityABS(field, data):
+    return np.abs(_cyl_RadialVelocity(field, data))
+def _Convert_cyl_RadialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_RadialVelocity", function=_cyl_RadialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityABS", function=_cyl_RadialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMS", function=_cyl_RadialVelocity,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMSABS", function=_cyl_RadialVelocityABS,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+
+def _cyl_TangentialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)
+    theta = data['cyl_theta']
+
+    return get_cyl_theta_component(velocities, theta, normal)
+
+def _cyl_TangentialVelocityABS(field, data):
+    return np.abs(_cyl_TangentialVelocity(field, data))
+def _Convert_cyl_TangentialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_TangentialVelocity", function=_cyl_TangentialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityABS", function=_cyl_TangentialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMS", function=_cyl_TangentialVelocity,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMSABS", function=_cyl_TangentialVelocityABS,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
 
 def _DynamicalTime(field, data):
     """
@@ -450,7 +448,7 @@
 
 # This is rho_total / rho_cr(z).
 def _Convert_Overdensity(data):
-    return 1 / (rho_crit_now * data.pf.hubble_constant**2 * 
+    return 1.0 / (rho_crit_now * data.pf.hubble_constant**2 * 
                 (1+data.pf.current_redshift)**3)
 add_field("Overdensity",function=_Matter_Density,
           convert_function=_Convert_Overdensity, units=r"")
@@ -470,8 +468,8 @@
     else:
         omega_baryon_now = 0.0441
     return data['Density'] / (omega_baryon_now * rho_crit_now * 
-                              (data.pf['CosmologyHubbleConstantNow']**2) * 
-                              ((1+data.pf['CosmologyCurrentRedshift'])**3))
+                              (data.pf.hubble_constant**2) * 
+                              ((1+data.pf.current_redshift)**3))
 add_field("Baryon_Overdensity", function=_Baryon_Overdensity, 
           units=r"")
 
@@ -640,13 +638,7 @@
           take_log=False, display_field=False)
 
 def obtain_velocities(data):
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = np.zeros(3, dtype='float64')
-    xv = data["x-velocity"] - bv[0]
-    yv = data["y-velocity"] - bv[1]
-    zv = data["z-velocity"] - bv[2]
-    return xv, yv, zv
+    return obtain_rv_vec(data)
 
 def _convertSpecificAngularMomentum(data):
     return data.convert("cm")
@@ -711,7 +703,7 @@
 #          convert_function=_convertSpecificAngularMomentum, vector_field=True,
 #          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter('center')])
 def _convertSpecificAngularMomentumKMSMPC(data):
-    return data.convert("mpc")/1e5
+    return km_per_cm*data.convert("mpc")
 #add_field("ParticleSpecificAngularMomentumKMSMPC",
 #          function=_ParticleSpecificAngularMomentum, particle_type=True,
 #          convert_function=_convertSpecificAngularMomentumKMSMPC, vector_field=True,
@@ -883,33 +875,32 @@
           display_name = "Radius (code)")
 
 def _RadialVelocity(field, data):
-    center = data.get_field_parameter("center")
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    new_field = ( (data['x']-center[0])*(data["x-velocity"]-bulk_velocity[0])
-                + (data['y']-center[1])*(data["y-velocity"]-bulk_velocity[1])
-                + (data['z']-center[2])*(data["z-velocity"]-bulk_velocity[2])
-                )/data["RadiusCode"]
-    if np.any(np.isnan(new_field)): # to fix center = point
-        new_field[np.isnan(new_field)] = 0.0
-    return new_field
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)    
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_r_component(velocities, theta, phi, normal)
+
 def _RadialVelocityABS(field, data):
     return np.abs(_RadialVelocity(field, data))
 def _ConvertRadialVelocityKMS(data):
-    return 1e-5
+    return km_per_cm
 add_field("RadialVelocity", function=_RadialVelocity,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityABS", function=_RadialVelocityABS,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityKMS", function=_RadialVelocity,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
 add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
+
+def _TangentialVelocity(field, data):
+    return np.sqrt(data["VelocityMagnitude"]**2.0
+                 - data["RadialVelocity"]**2.0)
+add_field("TangentialVelocity", 
+          function=_TangentialVelocity,
+          take_log=False, units=r"\rm{cm}/\rm{s}")
 
 def _CuttingPlaneVelocityX(field, data):
     x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
@@ -1026,6 +1017,47 @@
           display_name=r"\rm{Magnetic}\/\rm{Energy}",
           units="\rm{ergs}\/\rm{cm}^{-3}")
 
+def _BPoloidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_theta_component(Bfields, theta, phi, normal)
+
+add_field("BPoloidal", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BToroidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    phi   = data['sph_phi']
+
+    return get_sph_phi_component(Bfields, phi, normal)
+
+add_field("BToroidal", function=_BToroidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BRadial(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_r_component(Bfields, theta, phi, normal)
+
+add_field("BRadial", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
 def _VorticitySquared(field, data):
     mylog.debug("Generating vorticity on %s", data)
     # We need to set up stencils


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -26,10 +26,10 @@
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
-    assert_string_equal
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
 
-def assert_rel_equal(a1, a2, decimels):
-    return assert_almost_equal(a1/a2, 1.0, decimels)
+def assert_rel_equal(a1, a2, decimals):
+    return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 
@@ -139,11 +139,16 @@
         ndims = [ndims, ndims, ndims]
     else:
         assert(len(ndims) == 3)
-    if negative:
-        offset = 0.5
-    else:
-        offset = 0.0
+    if not iterable(negative):
+        negative = [negative for f in fields]
+    assert(len(fields) == len(negative))
+    offsets = []
+    for n in negative:
+        if n:
+            offsets.append(0.5)
+        else:
+            offsets.append(0.0)
     data = dict((field, (np.random.random(ndims) - offset) * peak_value)
-                 for field in fields)
+                 for field,offset in zip(fields,offsets))
     ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
     return ug


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -68,9 +68,12 @@
 def evaluate_domain_decomposition(n_d, pieces, ldom):
     """ Evaluate longest to shortest edge ratio
         BEWARE: lot's of magic here """
-    ideal_bsize = 3.0 * (pieces * np.product(n_d) ** 2) ** (1.0 / 3.0)
-    bsize = int(np.sum(
-        ldom / np.array(n_d, dtype=np.float64) * np.product(n_d)))
+    eff_dim = (n_d > 1).sum()
+    ideal_bsize = eff_dim * (pieces * np.product(n_d) ** (eff_dim - 1)
+                             ) ** (1.0 / eff_dim)
+    mask = np.where(n_d > 1)
+    nd_arr = np.array(n_d, dtype=np.float64)[mask]
+    bsize = int(np.sum(ldom[mask] / nd_arr * np.product(nd_arr)))
     load_balance = float(np.product(n_d)) / \
         (float(pieces) * np.product((n_d - 1) / ldom + 1))
 
@@ -134,23 +137,15 @@
 
 
 def split_array(tab, psize):
-    """ Split array into px*py*pz subarrays using internal numpy routine. """
-    temp = [np.array_split(array, psize[1], axis=1)
-            for array in np.array_split(tab, psize[2], axis=2)]
-    temp = [item for sublist in temp for item in sublist]
-    temp = [np.array_split(array, psize[0], axis=0) for array in temp]
-    temp = [item for sublist in temp for item in sublist]
-    return temp
-
-
-if __name__ == "__main__":
-
-    NPROC = 12
-    ARRAY = np.zeros((128, 128, 129))
-    BBOX = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
-
-    PROCS = get_psize(np.array(ARRAY.shape), NPROC)
-    LE, RE, DATA = decompose_array(ARRAY, PROCS, BBOX)
-
-    for idx in range(NPROC):
-        print LE[idx, :], RE[idx, :], DATA[idx].shape
+    """ Split array into px*py*pz subarrays. """
+    n_d = np.array(tab.shape, dtype=np.int64)
+    slices = []
+    for i in range(psize[0]):
+        for j in range(psize[1]):
+            for k in range(psize[2]):
+                piece = np.array((i, j, k), dtype=np.int64)
+                lei = n_d * piece / psize
+                rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
+                slices.append(np.s_[lei[0]:rei[0], lei[1]:
+                                    rei[1], lei[2]:rei[2]])
+    return [tab[slc] for slc in slices]


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -338,3 +338,47 @@
                     rg[2,i,j,k] = zg[i,j,k] - c[2]
         return rg
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def obtain_rv_vec(data):
+    # This is just to let the pointers exist and whatnot.  We can't cdef them
+    # inside conditionals.
+    cdef np.ndarray[np.float64_t, ndim=1] vxf
+    cdef np.ndarray[np.float64_t, ndim=1] vyf
+    cdef np.ndarray[np.float64_t, ndim=1] vzf
+    cdef np.ndarray[np.float64_t, ndim=2] rvf
+    cdef np.ndarray[np.float64_t, ndim=3] vxg
+    cdef np.ndarray[np.float64_t, ndim=3] vyg
+    cdef np.ndarray[np.float64_t, ndim=3] vzg
+    cdef np.ndarray[np.float64_t, ndim=4] rvg
+    cdef np.float64_t bv[3]
+    cdef int i, j, k
+    bulk_velocity = data.get_field_parameter("bulk_velocity")
+    if bulk_velocity == None:
+        bulk_velocity = np.zeros(3)
+    bv[0] = bulk_velocity[0]; bv[1] = bulk_velocity[1]; bv[2] = bulk_velocity[2]
+    if len(data['x-velocity'].shape) == 1:
+        # One dimensional data
+        vxf = data['x-velocity'].astype("float64")
+        vyf = data['y-velocity'].astype("float64")
+        vzf = data['z-velocity'].astype("float64")
+        rvf = np.empty((3, vxf.shape[0]), 'float64')
+        for i in range(vxf.shape[0]):
+            rvf[0, i] = vxf[i] - bv[0]
+            rvf[1, i] = vyf[i] - bv[1]
+            rvf[2, i] = vzf[i] - bv[2]
+        return rvf
+    else:
+        # Three dimensional data
+        vxg = data['x-velocity'].astype("float64")
+        vyg = data['y-velocity'].astype("float64")
+        vzg = data['z-velocity'].astype("float64")
+        rvg = np.empty((3, vxg.shape[0], vxg.shape[1], vxg.shape[2]), 'float64')
+        for i in range(vxg.shape[0]):
+            for j in range(vxg.shape[1]):
+                for k in range(vxg.shape[2]):
+                    rvg[0,i,j,k] = vxg[i,j,k] - bv[0]
+                    rvg[1,i,j,k] = vyg[i,j,k] - bv[1]
+                    rvg[2,i,j,k] = vzg[i,j,k] - bv[2]
+        return rvg


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -233,49 +233,6 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def obtain_rvec(data):
-    # This is just to let the pointers exist and whatnot.  We can't cdef them
-    # inside conditionals.
-    cdef np.ndarray[np.float64_t, ndim=1] xf
-    cdef np.ndarray[np.float64_t, ndim=1] yf
-    cdef np.ndarray[np.float64_t, ndim=1] zf
-    cdef np.ndarray[np.float64_t, ndim=2] rf
-    cdef np.ndarray[np.float64_t, ndim=3] xg
-    cdef np.ndarray[np.float64_t, ndim=3] yg
-    cdef np.ndarray[np.float64_t, ndim=3] zg
-    cdef np.ndarray[np.float64_t, ndim=4] rg
-    cdef np.float64_t c[3]
-    cdef int i, j, k
-    center = data.get_field_parameter("center")
-    c[0] = center[0]; c[1] = center[1]; c[2] = center[2]
-    if len(data['x'].shape) == 1:
-        # One dimensional data
-        xf = data['x']
-        yf = data['y']
-        zf = data['z']
-        rf = np.empty((3, xf.shape[0]), 'float64')
-        for i in range(xf.shape[0]):
-            rf[0, i] = xf[i] - c[0]
-            rf[1, i] = yf[i] - c[1]
-            rf[2, i] = zf[i] - c[2]
-        return rf
-    else:
-        # Three dimensional data
-        xg = data['x']
-        yg = data['y']
-        zg = data['z']
-        rg = np.empty((3, xg.shape[0], xg.shape[1], xg.shape[2]), 'float64')
-        for i in range(xg.shape[0]):
-            for j in range(xg.shape[1]):
-                for k in range(xg.shape[2]):
-                    rg[0,i,j,k] = xg[i,j,k] - c[0]
-                    rg[1,i,j,k] = yg[i,j,k] - c[1]
-                    rg[2,i,j,k] = zg[i,j,k] - c[2]
-        return rg
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
 def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data,
                        np.ndarray[np.float64_t, ndim=1] l_corner,
                        np.ndarray[np.float64_t, ndim=1] r_corner):


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/utilities/lib/tests/test_geometry_utils.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_geometry_utils.py
@@ -0,0 +1,30 @@
+from yt.testing import *
+from yt.utilities.lib import obtain_rvec, obtain_rv_vec
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_obtain_rvec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+    
+    dd = pf.h.sphere((0.5,0.5,0.5), 0.2)
+
+    coords = obtain_rvec(dd)
+
+    r = np.sqrt(np.sum(coords*coords,axis=0))
+
+    assert_array_less(r.max(), 0.2)
+
+    assert_array_less(0.0, r.min())
+
+def test_obtain_rv_vec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+
+    dd = pf.h.all_data()
+
+    vels = obtain_rv_vec(dd)
+
+    assert_array_equal(vels[0,:], dd['x-velocity'])
+    assert_array_equal(vels[1,:], dd['y-velocity'])
+    assert_array_equal(vels[2,:], dd['z-velocity'])


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -674,3 +674,191 @@
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
     
     return R
+
+def get_ortho_basis(normal):
+    xprime = np.cross([0.0,1.0,0.0],normal)
+    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
+    yprime = np.cross(normal,xprime)
+    zprime = normal
+    return (xprime, yprime, zprime)
+
+def get_sph_r(coords):
+    # The spherical coordinates radius is simply the magnitude of the
+    # coordinate vector.
+
+    return np.sqrt(np.sum(coords**2, axis=0))
+
+def resize_vector(vector,vector_array):
+    if len(vector_array.shape) == 4:
+        res_vector = np.resize(vector,(3,1,1,1))
+    else:
+        res_vector = np.resize(vector,(3,1))
+    return res_vector
+
+def get_sph_theta(coords, normal):
+    # The angle (theta) with respect to the normal (J), is the arccos
+    # of the dot product of the normal with the normalized coordinate
+    # vector.
+    
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    
+    J = np.tile(res_normal,tile_shape)
+
+    JdotCoords = np.sum(J*coords,axis=0)
+    
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=0)) )
+
+def get_sph_phi(coords, normal):
+    # We have freedom with respect to what axis (xprime) to define
+    # the disk angle. Here I've chosen to use the axis that is
+    # perpendicular to the normal and the y-axis. When normal ==
+    # y-hat, then set xprime = z-hat. With this definition, when
+    # normal == z-hat (as is typical), then xprime == x-hat.
+    #
+    # The angle is then given by the arctan of the ratio of the
+    # yprime-component and the xprime-component of the coordinate 
+    # vector.
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, coords)
+    res_yprime = resize_vector(yprime, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    Px = np.sum(Jx*coords,axis=0)
+    Py = np.sum(Jy*coords,axis=0)
+    
+    return np.arctan2(Py,Px)
+
+def get_cyl_r(coords, normal):
+    # The cross product of the normal (J) with a coordinate vector
+    # gives a vector of magnitude equal to the cylindrical radius.
+
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
+    
+    JcrossCoords = np.cross(J, coords, axisa=0, axisb=0, axisc=0)
+    return np.sqrt(np.sum(JcrossCoords**2, axis=0))
+
+def get_cyl_z(coords, normal):
+    # The dot product of the normal (J) with the coordinate vector 
+    # gives the cylindrical height.
+
+    res_normal = resize_vector(normal, coords)
+    
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
+
+    return np.sum(J*coords, axis=0)  
+
+def get_cyl_theta(coords, normal):
+    # This is identical to the spherical phi component
+
+    return get_sph_phi(coords, normal)
+
+
+def get_cyl_r_component(vectors, theta, normal):
+    # The r of a vector is the vector dotted with rhat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    rhat = Jx*np.cos(theta) + Jy*np.sin(theta)
+
+    return np.sum(vectors*rhat,axis=0)
+
+def get_cyl_theta_component(vectors, theta, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    thetahat = -Jx*np.sin(theta) + Jy*np.cos(theta)
+
+    return np.sum(vectors*thetahat, axis=0)
+
+def get_cyl_z_component(vectors, normal):
+    # The z component of a vector is the vector dotted with zhat
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    zhat = np.tile(res_zprime, tile_shape)
+
+    return np.sum(vectors*zhat, axis=0)
+
+def get_sph_r_component(vectors, theta, phi, normal):
+    # The r component of a vector is the vector dotted with rhat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
+
+    rhat = Jx*np.sin(theta)*np.cos(phi) + \
+           Jy*np.sin(theta)*np.sin(phi) + \
+           Jz*np.cos(theta)
+
+    return np.sum(vectors*rhat, axis=0)
+
+def get_sph_phi_component(vectors, phi, normal):
+    # The phi component of a vector is the vector dotted with phihat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    phihat = -Jx*np.sin(phi) + Jy*np.cos(phi)
+
+    return np.sum(vectors*phihat, axis=0)
+
+def get_sph_theta_component(vectors, theta, phi, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
+    
+    thetahat = Jx*np.cos(theta)*np.cos(phi) + \
+               Jy*np.cos(theta)*np.sin(phi) - \
+               Jz*np.sin(theta)
+
+    return np.sum(vectors*thetahat, axis=0)


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/utilities/tests/test_coordinate_conversions.py
--- /dev/null
+++ b/yt/utilities/tests/test_coordinate_conversions.py
@@ -0,0 +1,125 @@
+from yt.testing import *
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
+
+# Randomly generated coordinates in the domain [[-1,1],[-1,1],-1,1]]
+coords = np.array([[-0.41503037, -0.22102472, -0.55774212],
+                   [ 0.73828247, -0.17913899,  0.64076921],
+                   [ 0.08922066, -0.94254844, -0.61774511],
+                   [ 0.10173242, -0.95789145,  0.16294352],
+                   [ 0.73186508, -0.3109153 ,  0.75728738],
+                   [ 0.8757989 , -0.41475119, -0.57039201],
+                   [ 0.58040762,  0.81969082,  0.46759728],
+                   [-0.89983356, -0.9853683 , -0.38355343]]).T
+
+def test_spherical_coordinate_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.72950559,  0.99384957,  1.13047198,  0.97696269,  
+                   1.09807968,  1.12445067,  1.10788685,  1.38843954]
+    real_theta = [ 2.44113629,  0.87012028,  2.14891444,  1.4032274 ,  
+                   0.80979483,  2.10280198,  1.13507735,  1.85068416]
+    real_phi =   [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+
+    calc_r = get_sph_r(coords)
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+    normal = [1, 0, 0]
+    real_theta = [ 2.17598842,  0.73347681,  1.49179079,  1.46647589,  
+                   0.8412984 ,  0.67793705,  1.0193883 ,  2.27586987]
+    real_phi =   [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                   -2.75201227,-0.62870527,  2.08920872, -1.19959244]
+
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+    
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+def test_cylindrical_coordiante_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.47021498,  0.75970506,  0.94676179,  0.96327853,  
+                   0.79516968,  0.96904193,  1.00437346,  1.3344104 ]    
+    real_theta = [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+    real_z =     [-0.55774212,  0.64076921, -0.61774511,  0.16294352,
+                   0.75728738, -0.57039201,  0.46759728, -0.38355343]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+    normal = [1, 0, 0]
+    real_r =     [ 0.59994016,  0.66533898,  1.12694569,  0.97165149,
+                   0.81862843,  0.70524152,  0.94368441,  1.05738542]
+    real_theta = [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                  -2.75201227, -0.62870527,  2.08920872, -1.19959244]
+    real_z =     [-0.41503037,  0.73828247,  0.08922066,  0.10173242,
+                   0.73186508,  0.8757989 ,  0.58040762, -0.89983356]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+def test_spherical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_sph_theta(coords, normal)
+    phi = get_sph_phi(coords, normal)
+    zero = np.tile(0,coords.shape[1])
+
+    # Purely radial field
+    vecs = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(phi), np.cos(phi), zero])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
+
+    # Purely poloidal field
+    vecs = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -np.sin(theta)])
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
+
+def test_cylindrical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_cyl_theta(coords, normal)
+    z = get_cyl_z(coords, normal)
+    zero = np.tile(0, coords.shape[1])
+
+    # Purely radial field
+    vecs = np.array([np.cos(theta), np.sin(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(theta), np.cos(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))
+
+    # Purely z field
+    vecs = np.array([zero, zero, z])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))


diff -r 47ed8fb04ea4a685664cd3d648ed246dc9247c82 -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 yt/utilities/tests/test_decompose.py
--- /dev/null
+++ b/yt/utilities/tests/test_decompose.py
@@ -0,0 +1,96 @@
+"""
+Test suite for cartesian domain decomposition.
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import assert_array_equal, assert_almost_equal
+import numpy as np
+import yt.utilities.decompose as dec
+
+
+def setup():
+    pass
+
+
+def test_psize_2d():
+    procs = dec.get_psize(np.array([5, 1, 7]), 6)
+    assert_array_equal(procs, np.array([3, 1, 2]))
+    procs = dec.get_psize(np.array([1, 7, 5]), 6)
+    assert_array_equal(procs, np.array([1, 2, 3]))
+    procs = dec.get_psize(np.array([7, 5, 1]), 6)
+    assert_array_equal(procs, np.array([2, 3, 1]))
+
+
+def test_psize_3d():
+    procs = dec.get_psize(np.array([33, 35, 37]), 12)
+    assert_array_equal(procs, np.array([3, 2, 2]))
+
+
+def test_decomposition_2d():
+    array = np.ones((7, 5, 1))
+    bbox = np.array([[-0.7, 0.0], [1.5, 2.0], [0.0, 0.7]])
+    ledge, redge, data = dec.decompose_array(array, np.array([2, 3, 1]), bbox)
+
+    assert_array_equal(data[1].shape, np.array([3, 2, 1]))
+
+    gold_le = np.array([
+                       [-0.7, 1.5, 0.0], [-0.7, 1.6, 0.0],
+                       [-0.7, 1.8, 0.0], [-0.4, 1.5, 0.0],
+                       [-0.4, 1.6, 0.0], [-0.4, 1.8, 0.0]
+                       ])
+    assert_almost_equal(ledge, gold_le, 8)
+
+    gold_re = np.array(
+        [[-0.4, 1.6, 0.7], [-0.4, 1.8, 0.7],
+         [-0.4, 2.0, 0.7], [0.0, 1.6, 0.7],
+         [0.0, 1.8, 0.7], [0.0, 2.0, 0.7]]
+    )
+    assert_almost_equal(redge, gold_re, 8)
+
+
+def test_decomposition_3d():
+    array = np.ones((33, 35, 37))
+    bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+
+    ledge, redge, data = dec.decompose_array(array, np.array([3, 2, 2]), bbox)
+    assert_array_equal(data[0].shape, np.array([11, 17, 18]))
+
+    gold_le = np.array(
+        [[0.00000, -1.50000, 1.00000], [0.00000, -1.50000, 1.72973],
+         [0.00000, -0.04286, 1.00000], [0.00000, -0.04286, 1.72973],
+         [0.33333, -1.50000, 1.00000], [0.33333, -1.50000, 1.72973],
+         [0.33333, -0.04286, 1.00000], [0.33333, -0.04286, 1.72973],
+         [0.66667, -1.50000, 1.00000], [0.66667, -1.50000, 1.72973],
+         [0.66667, -0.04286, 1.00000], [0.66667, -0.04286, 1.72973]]
+    )
+    assert_almost_equal(ledge, gold_le, 5)
+
+    gold_re = np.array(
+        [[0.33333, -0.04286, 1.72973], [0.33333, -0.04286, 2.50000],
+         [0.33333, 1.50000, 1.72973], [0.33333, 1.50000, 2.50000],
+         [0.66667, -0.04286, 1.72973], [0.66667, -0.04286, 2.50000],
+         [0.66667, 1.50000, 1.72973], [0.66667, 1.50000, 2.50000],
+         [1.00000, -0.04286, 1.72973], [1.00000, -0.04286, 2.50000],
+         [1.00000, 1.50000, 1.72973], [1.00000, 1.50000, 2.50000]]
+    )
+    assert_almost_equal(redge, gold_re, 5)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/6bbad7ce654e/
changeset:   6bbad7ce654e
branch:      yt
user:        MatthewTurk
date:        2012-10-19 22:42:04
summary:     Adding simple tests for cut_region and extract_region.
affected #:  1 file

diff -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 -r 6bbad7ce654ec4f2cdd21c5262160645c2255aa5 yt/data_objects/tests/test_extract_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -0,0 +1,53 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_cut_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        r = dd.cut_region( [ "grid['Temperature'] > 0.5",
+                             "grid['Density'] < 0.75",
+                             "grid['x-velocity'] > 0.25" ])
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        t2 = (r["Temperature"] < 0.75)
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+
+def test_extract_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        r = dd.extract_region(t)
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        t2 = (r["Temperature"] < 0.75)
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+        t3 = (r["Temperature"] < 0.75)
+        r3 = r.extract_region( t3 )
+        yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
+        yield assert_equal, np.all(r3["Temperature"] < 0.75), True



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2bc7ed50b2bb/
changeset:   2bc7ed50b2bb
branch:      yt
user:        MatthewTurk
date:        2012-10-20 00:59:17
summary:     Adding a catch for finding no filenames in call to
TimeSeriesData.from_filenames.  Only applies if TimeSeriesData is called with a
glob matching pattern.  Closes #454 .
affected #:  2 files

diff -r 6bbad7ce654ec4f2cdd21c5262160645c2255aa5 -r 2bc7ed50b2bb9533873967c29c095f0852c0e1b5 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -258,8 +258,11 @@
 
         """
         if isinstance(filenames, types.StringTypes):
+            pattern = filenames
             filenames = glob.glob(filenames)
             filenames.sort()
+            if len(filenames) == 0:
+                raise YTNoFilenamesMatchPattern(pattern)
         obj = cls(filenames[:], parallel = parallel)
         return obj
 


diff -r 6bbad7ce654ec4f2cdd21c5262160645c2255aa5 -r 2bc7ed50b2bb9533873967c29c095f0852c0e1b5 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -146,3 +146,11 @@
     def __str__(self):
         return "You must create an API key before uploading.  See " + \
                "https://data.yt-project.org/getting_started.html"
+
+class YTNoFilenamesMatchPattern(YTException):
+    def __init__(self, pattern):
+        self.pattern = pattern
+
+    def __str__(self):
+        return "No filenames were found to match the pattern: " + \
+               "'%s'" % (self.pattern)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3ffebbf3e5be/
changeset:   3ffebbf3e5be
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-20 14:25:39
summary:     Merging from tip of mainline development
affected #:  14 files

diff -r 33a1976b9deef39ea2844784c78cdd03ef798af4 -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -559,16 +559,16 @@
                     continue
             else:
                 nz_filter = None
-            mins.append(data[field][nz_filter].min())
-            maxs.append(data[field][nz_filter].max())
+            mins.append(np.nanmin(data[field][nz_filter]))
+            maxs.append(np.nanmax(data[field][nz_filter]))
         else:
             if this_filter.any():
                 if non_zero:
                     nz_filter = ((this_filter) &
                                  (data[field][this_filter] > 0.0))
                 else: nz_filter = this_filter
-                mins.append(data[field][nz_filter].min())
-                maxs.append(data[field][nz_filter].max())
+                mins.append(np.nanmin(data[field][nz_filter]))
+                maxs.append(np.nanmax(data[field][nz_filter]))
             else:
                 mins.append(1e90)
                 maxs.append(-1e90)


diff -r 33a1976b9deef39ea2844784c78cdd03ef798af4 -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -173,7 +173,8 @@
             # required attrs
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
-                pf.hubble_constant = pf.cosmological_simulation = 0.0
+                pf.cosmological_simulation = 0.0
+            pf.hubble_constant = 0.7
             pf.domain_left_edge = np.zeros(3, 'float64')
             pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3


diff -r 33a1976b9deef39ea2844784c78cdd03ef798af4 -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d yt/data_objects/tests/test_derived_quantities.py
--- /dev/null
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -0,0 +1,24 @@
+from yt.testing import *
+import numpy as np
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_extrema():
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(16, nprocs = nprocs, fields = ("Density",
+                "x-velocity", "y-velocity", "z-velocity"))
+        sp = pf.h.sphere("c", (0.25, '1'))
+        (mi, ma), = sp.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(sp["Density"])
+        yield assert_equal, ma, np.nanmax(sp["Density"])
+        dd = pf.h.all_data()
+        (mi, ma), = dd.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(dd["Density"])
+        yield assert_equal, ma, np.nanmax(dd["Density"])
+        sp = pf.h.sphere("max", (0.25, '1'))
+        yield assert_equal, np.any(np.isnan(sp["RadialVelocity"])), True
+        (mi, ma), = dd.quantities["Extrema"]("RadialVelocity")
+        yield assert_equal, mi, np.nanmin(dd["RadialVelocity"])
+        yield assert_equal, ma, np.nanmax(dd["RadialVelocity"])


diff -r 33a1976b9deef39ea2844784c78cdd03ef798af4 -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d yt/data_objects/tests/test_extract_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -0,0 +1,53 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_cut_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        r = dd.cut_region( [ "grid['Temperature'] > 0.5",
+                             "grid['Density'] < 0.75",
+                             "grid['x-velocity'] > 0.25" ])
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        t2 = (r["Temperature"] < 0.75)
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+
+def test_extract_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        r = dd.extract_region(t)
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        t2 = (r["Temperature"] < 0.75)
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+        t3 = (r["Temperature"] < 0.75)
+        r3 = r.extract_region( t3 )
+        yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
+        yield assert_equal, np.all(r3["Temperature"] < 0.75), True


diff -r 33a1976b9deef39ea2844784c78cdd03ef798af4 -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d yt/data_objects/tests/test_fields.py
--- /dev/null
+++ b/yt/data_objects/tests/test_fields.py
@@ -0,0 +1,91 @@
+from yt.testing import *
+import numpy as np
+from yt.data_objects.field_info_container import \
+    FieldInfo
+import yt.data_objects.universal_fields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    np.seterr(all = 'ignore')
+
+_sample_parameters = dict(
+    axis = 0,
+    center = np.array((0.0, 0.0, 0.0)),
+    bulk_velocity = np.array((0.0, 0.0, 0.0)),
+    normal = np.array((0.0, 0.0, 1.0)),
+    cp_x_vec = np.array((1.0, 0.0, 0.0)),
+    cp_y_vec = np.array((0.0, 1.0, 0.0)),
+    cp_z_vec = np.array((0.0, 0.0, 1.0)),
+)
+
+_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
+
+def realistic_pf(fields, nprocs):
+    pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
+    pf.parameters["HydroMethod"] = "streaming"
+    pf.parameters["Gamma"] = 5.0/3.0
+    pf.parameters["EOSType"] = 1.0
+    pf.parameters["EOSSoundSpeed"] = 1.0
+    pf.conversion_factors["Time"] = 1.0
+    pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
+    pf.current_redshift = 0.0001
+    pf.hubble_constant = 0.7
+    for unit in mpc_conversion:
+        pf.units[unit+'h'] = pf.units[unit]
+        pf.units[unit+'cm'] = pf.units[unit]
+        pf.units[unit+'hcm'] = pf.units[unit]
+    return pf
+
+class TestFieldAccess(object):
+    description = None
+
+    def __init__(self, field_name, nproc):
+        # Note this should be a field name
+        self.field_name = field_name
+        self.description = "Accessing_%s_%s" % (field_name, nproc)
+        self.nproc = nproc
+
+    def __call__(self):
+        field = FieldInfo[self.field_name]
+        deps = field.get_dependencies()
+        fields = deps.requested + _base_fields
+        skip_grids = False
+        needs_spatial = False
+        for v in field.validators:
+            f = getattr(v, "fields", None)
+            if f: fields += f
+            if getattr(v, "ghost_zones", 0) > 0:
+                skip_grids = True
+            if hasattr(v, "ghost_zones"):
+                needs_spatial = True
+        pf = realistic_pf(fields, self.nproc)
+        # This gives unequal sized grids as well as subgrids
+        dd1 = pf.h.all_data()
+        dd2 = pf.h.all_data()
+        dd1.field_parameters.update(_sample_parameters)
+        dd2.field_parameters.update(_sample_parameters)
+        v1 = dd1[self.field_name]
+        conv = field._convert_function(dd1) or 1.0
+        if not needs_spatial:
+            assert_equal(v1, conv*field._function(field, dd2))
+        if not skip_grids:
+            for g in pf.h.grids:
+                g.field_parameters.update(_sample_parameters)
+                conv = field._convert_function(g) or 1.0
+                v1 = g[self.field_name]
+                g.clear_data()
+                g.field_parameters.update(_sample_parameters)
+                assert_equal(v1, conv*field._function(field, g))
+
+def test_all_fields():
+    for field in FieldInfo:
+        if field.startswith("CuttingPlane"): continue
+        if field.startswith("particle"): continue
+        if field.startswith("CIC"): continue
+        if field.startswith("WeakLensingConvergence"): continue
+        if FieldInfo[field].particle_type: continue
+        for nproc in [1, 4, 8]:
+            yield TestFieldAccess(field, nproc)


diff -r 33a1976b9deef39ea2844784c78cdd03ef798af4 -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d yt/data_objects/tests/test_ortho_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -0,0 +1,25 @@
+from yt.testing import *
+
+def test_ortho_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
+
+    axes = ['x', 'y', 'z']
+    for ax, an in enumerate(axes):
+        ocoord = np.random.random(2)
+
+        my_oray = pf.h.ortho_ray(ax, ocoord)
+
+        my_axes = range(3)
+        del my_axes[ax]
+
+        # find the cells intersected by the ortho ray
+        my_all = pf.h.all_data()
+        my_cells = (np.abs(my_all[axes[my_axes[0]]] - ocoord[0]) <= 
+                    0.5 * dx[my_axes[0]]) & \
+                   (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
+                    0.5 * dx[my_axes[1]])
+
+        assert_equal(my_oray['Density'].sum(),
+                     my_all['Density'][my_cells].sum())


diff -r 33a1976b9deef39ea2844784c78cdd03ef798af4 -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d yt/data_objects/tests/test_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_rays.py
@@ -0,0 +1,31 @@
+from yt.testing import *
+
+def test_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+      pf.domain_dimensions
+
+    p1 = np.random.random(3)
+    p2 = np.random.random(3)
+
+    my_ray = pf.h.ray(p1, p2)
+    assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
+    ray_cells = my_ray['dts'] > 0
+
+    # find cells intersected by the ray
+    my_all = pf.h.all_data()
+    
+    dt = np.abs(dx / (p2 - p1))
+    tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
+                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
+                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
+    tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
+                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
+                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
+    tin = tin.max(axis=0)
+    tout = tout.min(axis=0)
+    my_cells = (tin < tout) & (tin < 1) & (tout > 0)
+
+    assert_rel_equal(ray_cells.sum(), my_cells.sum(), 14)
+    assert_rel_equal(my_ray['Density'][ray_cells].sum(),
+                     my_all['Density'][my_cells].sum(), 14)


diff -r 33a1976b9deef39ea2844784c78cdd03ef798af4 -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -258,8 +258,11 @@
 
         """
         if isinstance(filenames, types.StringTypes):
+            pattern = filenames
             filenames = glob.glob(filenames)
             filenames.sort()
+            if len(filenames) == 0:
+                raise YTNoFilenamesMatchPattern(pattern)
         obj = cls(filenames[:], parallel = parallel)
         return obj
 


diff -r 33a1976b9deef39ea2844784c78cdd03ef798af4 -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -219,9 +219,9 @@
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
       
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
-    return get_sph_r(vectors, center)
+    return get_sph_r(coords)
 
 def _Convert_sph_r_CGS(data):
    return data.convert("cm")
@@ -236,7 +236,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
     return get_sph_theta(coords, normal)
 
@@ -249,7 +249,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
     return get_sph_phi(coords, normal)
 
@@ -261,7 +261,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
       
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
     return get_cyl_r(coords, normal)
 
@@ -281,7 +281,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
     return get_cyl_z(coords, normal)
 
@@ -298,7 +298,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = obtain_rvec(data).transpose()
+    coords = obtain_rvec(data)
 
     return get_cyl_theta(coords, normal)
 
@@ -339,9 +339,9 @@
 
 def _cyl_RadialVelocity(field, data):
     normal = data.get_field_parameter("normal")
-    velocities = obtain_rv_vec(data).transpose()
+    velocities = obtain_rv_vec(data)
 
-    theta = np.tile(data['cyl_theta'], (3, 1)).transpose()
+    theta = data['cyl_theta']
 
     return get_cyl_r_component(velocities, theta, normal)
 
@@ -364,8 +364,8 @@
 
 def _cyl_TangentialVelocity(field, data):
     normal = data.get_field_parameter("normal")
-    velocities = obtain_rv_vec(data).transpose()
-    theta = np.tile(data['cyl_theta'], (3, 1)).transpose()
+    velocities = obtain_rv_vec(data)
+    theta = data['cyl_theta']
 
     return get_cyl_theta_component(velocities, theta, normal)
 
@@ -443,7 +443,7 @@
 
 # This is rho_total / rho_cr(z).
 def _Convert_Overdensity(data):
-    return 1 / (rho_crit_now * data.pf.hubble_constant**2 * 
+    return 1.0 / (rho_crit_now * data.pf.hubble_constant**2 * 
                 (1+data.pf.current_redshift)**3)
 add_field("Overdensity",function=_Matter_Density,
           convert_function=_Convert_Overdensity, units=r"")
@@ -463,8 +463,8 @@
     else:
         omega_baryon_now = 0.0441
     return data['Density'] / (omega_baryon_now * rho_crit_now * 
-                              (data.pf['CosmologyHubbleConstantNow']**2) * 
-                              ((1+data.pf['CosmologyCurrentRedshift'])**3))
+                              (data.pf.hubble_constant**2) * 
+                              ((1+data.pf.current_redshift)**3))
 add_field("Baryon_Overdensity", function=_Baryon_Overdensity, 
           units=r"")
 
@@ -871,9 +871,9 @@
 
 def _RadialVelocity(field, data):
     normal = data.get_field_parameter("normal")
-    velocities = obtain_rv_vec(data).transpose()    
-    theta = np.tile(data['sph_theta'], (3, 1)).transpose()
-    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+    velocities = obtain_rv_vec(data)    
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
 
     return get_sph_r_component(velocities, theta, phi, normal)
 
@@ -1017,8 +1017,8 @@
 
     Bfields = np.array([data['Bx'], data['By'], data['Bz']])
 
-    theta = np.tile(data['sph_theta'], (3, 1)).transpose()
-    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
 
     return get_sph_theta_component(Bfields, theta, phi, normal)
 
@@ -1031,7 +1031,7 @@
 
     Bfields = np.array([data['Bx'], data['By'], data['Bz']])
 
-    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+    phi   = data['sph_phi']
 
     return get_sph_phi_component(Bfields, phi, normal)
 
@@ -1044,8 +1044,8 @@
 
     Bfields = np.array([data['Bx'], data['By'], data['Bz']])
 
-    theta = np.tile(data['sph_theta'], (3, 1)).transpose()
-    phi   = np.tile(data['sph_phi'], (3, 1)).transpose()
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
 
     return get_sph_r_component(Bfields, theta, phi, normal)
 


diff -r 33a1976b9deef39ea2844784c78cdd03ef798af4 -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -26,10 +26,10 @@
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
-    assert_array_less, assert_string_equal
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
 
-def assert_rel_equal(a1, a2, decimels):
-    return assert_almost_equal(a1/a2, 1.0, decimels)
+def assert_rel_equal(a1, a2, decimals):
+    return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 


diff -r 33a1976b9deef39ea2844784c78cdd03ef798af4 -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -91,6 +91,30 @@
     def __str__(self):
         return "Simulation time-series type %s not defined." % self.sim_type
 
+class YTCannotParseFieldDisplayName(YTException):
+    def __init__(self, field_name, display_name, mathtext_error):
+        self.field_name = field_name
+        self.display_name = display_name
+        self.mathtext_error = mathtext_error
+
+    def __str__(self):
+        return ("The display name \"%s\" "
+                "of the derived field %s " 
+                "contains the following LaTeX parser errors:\n" ) \
+                % (self.display_name, self.field_name) + self.mathtext_error
+
+class YTCannotParseUnitDisplayName(YTException):
+    def __init__(self, field_name, display_unit, mathtext_error):
+        self.field_name = field_name
+        self.unit_name = unit_name
+        self.mathtext_error = mathtext_error
+
+    def __str__(self):
+        return ("The unit display name \"%s\" "
+                "of the derived field %s " 
+                "contains the following LaTeX parser errors:\n" ) \
+            % (self.unit_name, self.field_name) + self.mathtext_error
+
 class AmbiguousOutputs(YTException):
     def __init__(self, pf):
         YTException.__init__(self, pf)
@@ -149,6 +173,14 @@
         return "You must create an API key before uploading.  See " + \
                "https://data.yt-project.org/getting_started.html"
 
+class YTNoFilenamesMatchPattern(YTException):
+    def __init__(self, pattern):
+        self.pattern = pattern
+
+    def __str__(self):
+        return "No filenames were found to match the pattern: " + \
+               "'%s'" % (self.pattern)
+
 class YTNoOldAnswer(YTException):
     def __init__(self, path):
         self.path = path


diff -r 33a1976b9deef39ea2844784c78cdd03ef798af4 -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -686,20 +686,29 @@
     # The spherical coordinates radius is simply the magnitude of the
     # coordinate vector.
 
-    return np.sqrt(np.sum(coords**2, axis=-1))
+    return np.sqrt(np.sum(coords**2, axis=0))
 
+def resize_vector(vector,vector_array):
+    if len(vector_array.shape) == 4:
+        res_vector = np.resize(vector,(3,1,1,1))
+    else:
+        res_vector = np.resize(vector,(3,1))
+    return res_vector
 
 def get_sph_theta(coords, normal):
     # The angle (theta) with respect to the normal (J), is the arccos
     # of the dot product of the normal with the normalized coordinate
     # vector.
     
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
+    res_normal = resize_vector(normal, coords)
 
-    JdotCoords = np.sum(J*coords,axis=-1)
+    tile_shape = [1] + list(coords.shape)[1:]
     
-    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
+    J = np.tile(res_normal,tile_shape)
+
+    JdotCoords = np.sum(J*coords,axis=0)
+    
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=0)) )
 
 def get_sph_phi(coords, normal):
     # We have freedom with respect to what axis (xprime) to define
@@ -713,13 +722,16 @@
     # vector.
 
     (xprime, yprime, zprime) = get_ortho_basis(normal)
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
 
-    Px = np.sum(Jx*coords,axis=-1)
-    Py = np.sum(Jy*coords,axis=-1)
+    res_xprime = resize_vector(xprime, coords)
+    res_yprime = resize_vector(yprime, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    Px = np.sum(Jx*coords,axis=0)
+    Py = np.sum(Jy*coords,axis=0)
     
     return np.arctan2(Py,Px)
 
@@ -727,20 +739,24 @@
     # The cross product of the normal (J) with a coordinate vector
     # gives a vector of magnitude equal to the cylindrical radius.
 
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal, tile_shape)
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
     
-    JcrossCoords = np.cross(J, coords)
-    return np.sqrt(np.sum(JcrossCoords**2, axis=-1))
+    JcrossCoords = np.cross(J, coords, axisa=0, axisb=0, axisc=0)
+    return np.sqrt(np.sum(JcrossCoords**2, axis=0))
 
 def get_cyl_z(coords, normal):
     # The dot product of the normal (J) with the coordinate vector 
     # gives the cylindrical height.
+
+    res_normal = resize_vector(normal, coords)
     
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal, tile_shape)
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
 
-    return np.sum(J*coords, axis=-1)  
+    return np.sum(J*coords, axis=0)  
 
 def get_cyl_theta(coords, normal):
     # This is identical to the spherical phi component
@@ -753,77 +769,96 @@
 
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
 
     rhat = Jx*np.cos(theta) + Jy*np.sin(theta)
 
-    return np.sum(vectors*rhat,axis=-1)
+    return np.sum(vectors*rhat,axis=0)
 
 def get_cyl_theta_component(vectors, theta, normal):
     # The theta component of a vector is the vector dotted with thetahat
     
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
 
     thetahat = -Jx*np.sin(theta) + Jy*np.cos(theta)
 
-    return np.sum(vectors*thetahat, axis=-1)
+    return np.sum(vectors*thetahat, axis=0)
 
 def get_cyl_z_component(vectors, normal):
     # The z component of a vector is the vector dotted with zhat
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    zhat = np.tile(zprime, tile_shape)
+    res_zprime = resize_vector(zprime, vectors)
 
-    return np.sum(vectors*zhat, axis=-1)
+    tile_shape = [1] + list(vectors.shape)[1:]
+    zhat = np.tile(res_zprime, tile_shape)
+
+    return np.sum(vectors*zhat, axis=0)
 
 def get_sph_r_component(vectors, theta, phi, normal):
     # The r component of a vector is the vector dotted with rhat
     
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
-    Jz = np.tile(zprime,tile_shape)
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
 
     rhat = Jx*np.sin(theta)*np.cos(phi) + \
            Jy*np.sin(theta)*np.sin(phi) + \
            Jz*np.cos(theta)
 
-    return np.sum(vectors*rhat, axis=-1)
+    return np.sum(vectors*rhat, axis=0)
 
 def get_sph_phi_component(vectors, phi, normal):
     # The phi component of a vector is the vector dotted with phihat
 
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
 
     phihat = -Jx*np.sin(phi) + Jy*np.cos(phi)
 
-    return np.sum(vectors*phihat, axis=-1)
+    return np.sum(vectors*phihat, axis=0)
 
 def get_sph_theta_component(vectors, theta, phi, normal):
     # The theta component of a vector is the vector dotted with thetahat
     
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
-    tile_shape = list(vectors.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
-    Jz = np.tile(zprime,tile_shape)
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
     
     thetahat = Jx*np.cos(theta)*np.cos(phi) + \
                Jy*np.cos(theta)*np.sin(phi) - \
                Jz*np.sin(theta)
 
-    return np.sum(vectors*thetahat, axis=-1)
+    return np.sum(vectors*thetahat, axis=0)


diff -r 33a1976b9deef39ea2844784c78cdd03ef798af4 -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d yt/utilities/tests/test_coordinate_conversions.py
--- a/yt/utilities/tests/test_coordinate_conversions.py
+++ b/yt/utilities/tests/test_coordinate_conversions.py
@@ -18,7 +18,7 @@
                    [ 0.73186508, -0.3109153 ,  0.75728738],
                    [ 0.8757989 , -0.41475119, -0.57039201],
                    [ 0.58040762,  0.81969082,  0.46759728],
-                   [-0.89983356, -0.9853683 , -0.38355343]])
+                   [-0.89983356, -0.9853683 , -0.38355343]]).T
 
 def test_spherical_coordinate_conversion():
     normal = [0, 0, 1]
@@ -85,44 +85,41 @@
 def test_spherical_coordinate_projections():
     normal = [0, 0, 1]
     theta = get_sph_theta(coords, normal)
-    theta_pass = np.tile(theta, (3, 1)).T
     phi = get_sph_phi(coords, normal)
-    phi_pass = np.tile(phi, (3, 1)).T
-    zero = np.tile(0,coords.shape[0])
+    zero = np.tile(0,coords.shape[1])
 
     # Purely radial field
-    vecs = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)]).T
-    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta_pass, phi_pass, normal))
-    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi_pass, normal))
+    vecs = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
 
     # Purely toroidal field
-    vecs = np.array([-np.sin(phi), np.cos(phi), zero]).T
-    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta_pass, phi_pass, normal))
-    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta_pass, phi_pass, normal))
+    vecs = np.array([-np.sin(phi), np.cos(phi), zero])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
 
     # Purely poloidal field
-    vecs = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -np.sin(theta)]).T
-    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi_pass, normal))
-    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta_pass, phi_pass, normal))
+    vecs = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -np.sin(theta)])
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
 
 def test_cylindrical_coordinate_projections():
     normal = [0, 0, 1]
     theta = get_cyl_theta(coords, normal)
-    theta_pass = np.tile(theta, (3, 1)).T
     z = get_cyl_z(coords, normal)
-    zero = np.tile(0, coords.shape[0])
+    zero = np.tile(0, coords.shape[1])
 
     # Purely radial field
-    vecs = np.array([np.cos(theta), np.sin(theta), zero]).T
-    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta_pass, normal))
+    vecs = np.array([np.cos(theta), np.sin(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
     assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
 
     # Purely toroidal field
-    vecs = np.array([-np.sin(theta), np.cos(theta), zero]).T
+    vecs = np.array([-np.sin(theta), np.cos(theta), zero])
     assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
-    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta_pass, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))
 
     # Purely z field
-    vecs = np.array([zero, zero, z]).T
-    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta_pass, normal))
-    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta_pass, normal))
+    vecs = np.array([zero, zero, z])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))


diff -r 33a1976b9deef39ea2844784c78cdd03ef798af4 -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -146,7 +146,8 @@
     @parallel_passthrough
     def _finalize_parallel(self,data):
         self.streamlines = self.comm.mpi_allreduce(self.streamlines, op='sum')
-        self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
+        if self.get_magnitude:
+            self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
         
     def _integrate_through_brick(self, node, stream, step,
                                  periodic=False, mag=None):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d40f13b34e9f/
changeset:   d40f13b34e9f
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-20 15:36:20
summary:     Continuing with the coordinate handling.  Moving cartesian fields, ensuring all
static outputs have a coordinate handler, fleshing out coordinate handler.
Many, many tests fail.
affected #:  9 files

diff -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d -r d40f13b34e9fa7daf61dad0e512fe74d2328982b setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -7,5 +7,5 @@
 where=yt
 exclude=answer_testing
 with-xunit=1
-with-answer-testing=1
-answer-compare=gold001
+#with-answer-testing=1
+#answer-compare=gold001


diff -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d -r d40f13b34e9fa7daf61dad0e512fe74d2328982b yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -40,6 +40,11 @@
 from yt.utilities.minimal_representation import \
     MinimalStaticOutput
 
+from yt.geometry.coordinate_handler import \
+    CartesianCoordinateHandler, \
+    PolarCoordinateHandler, \
+    CylindricalCoordinateHandler
+
 # We want to support the movie format in the future.
 # When such a thing comes to pass, I'll move all the stuff that is contant up
 # to here, and then have it instantiate EnzoStaticOutputs as appropriate.
@@ -53,6 +58,7 @@
     fluid_types = ("gas",)
     particle_types = ("all",)
     geometry = "cartesian"
+    coordinates = None
 
     class __metaclass__(type):
         def __init__(cls, name, b, d):
@@ -96,6 +102,7 @@
         self.min_level = 0
 
         self._parse_parameter_file()
+        self._setup_coordinate_handler()
         self._set_units()
         self._set_derived_attrs()
 
@@ -223,9 +230,20 @@
             # away the exising field_info.
             self.field_info = FieldInfoContainer.create_with_fallback(
                                 self._fieldinfo_fallback)
+            self.field_info.update(self.coordinates.coordinate_fields())
         if getattr(self, "field_dependencies", None) is None:
             self.field_dependencies = {}
 
+    def _setup_coordinate_handler(self):
+        if self.geometry == "cartesian":
+            self.coordinates = CartesianCoordinateHandler(self)
+        elif self.geometry == "cylindrical":
+            self.coordinates = CylindricalCoordinateHandler(self)
+        elif self.geometry == "polar":
+            self.coordinates = PolarCoordinateHandler(self)
+        else:
+            raise YTGeometryNotSupported(self.geometry)
+
 def _reconstruct_pf(*args, **kwargs):
     pfs = ParameterFileStore()
     pf = pfs.get_pf_hash(*args)


diff -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d -r d40f13b34e9fa7daf61dad0e512fe74d2328982b yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -68,48 +68,6 @@
     get_cyl_z, get_sph_r, \
     get_sph_theta, get_sph_phi
      
-# Note that, despite my newfound efforts to comply with PEP-8,
-# I violate it here in order to keep the name/func_name relationship
-
-def _dx(field, data):
-    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
-add_field('dx', function=_dx, display_field=False,
-          validators=[ValidateSpatial(0)])
-
-def _dy(field, data):
-    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
-add_field('dy', function=_dy, display_field=False,
-          validators=[ValidateSpatial(0)])
-
-def _dz(field, data):
-    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
-add_field('dz', function=_dz,
-          display_field=False, validators=[ValidateSpatial(0)])
-
-def _coordX(field, data):
-    dim = data.ActiveDimensions[0]
-    return (np.ones(data.ActiveDimensions, dtype='float64')
-                   * np.arange(data.ActiveDimensions[0])[:,None,None]
-            +0.5) * data['dx'] + data.LeftEdge[0]
-add_field('x', function=_coordX, display_field=False,
-          validators=[ValidateSpatial(0)])
-
-def _coordY(field, data):
-    dim = data.ActiveDimensions[1]
-    return (np.ones(data.ActiveDimensions, dtype='float64')
-                   * np.arange(data.ActiveDimensions[1])[None,:,None]
-            +0.5) * data['dy'] + data.LeftEdge[1]
-add_field('y', function=_coordY, display_field=False,
-          validators=[ValidateSpatial(0)])
-
-def _coordZ(field, data):
-    dim = data.ActiveDimensions[2]
-    return (np.ones(data.ActiveDimensions, dtype='float64')
-                   * np.arange(data.ActiveDimensions[2])[None,None,:]
-            +0.5) * data['dz'] + data.LeftEdge[2]
-add_field('z', function=_coordZ, display_field=False,
-          validators=[ValidateSpatial(0)])
-
 def _GridLevel(field, data):
     return np.ones(data.ActiveDimensions)*(data.Level)
 add_field("GridLevel", function=_GridLevel,


diff -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d -r d40f13b34e9fa7daf61dad0e512fe74d2328982b yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -43,8 +43,7 @@
 from yt.utilities.io_handler import \
     io_registry
 from yt.utilities.physical_constants import cm_per_mpc
-from .fields import FLASHFieldInfo, add_flash_field, KnownFLASHFields, \
-    CylindricalFLASHFieldInfo, PolarFLASHFieldInfo
+from .fields import FLASHFieldInfo, add_flash_field, KnownFLASHFields
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
      ValidateDataField, TranslationFunc
 
@@ -218,7 +217,7 @@
 
 class FLASHStaticOutput(StaticOutput):
     _hierarchy_class = FLASHHierarchy
-    #_fieldinfo_fallback = FLASHFieldInfo # Now a property
+    _fieldinfo_fallback = FLASHFieldInfo
     _fieldinfo_known = KnownFLASHFields
     _handle = None
     
@@ -414,14 +413,9 @@
         self.dimensionality = dimensionality
 
         self.geometry = self.parameters["geometry"]
-        if self.geometry == "cartesian":
-            self._setup_cartesian_coordinates()
-        elif self.geometry == "cylindrical":
-            self._setup_cylindrical_coordinates()
-        elif self.geometry == "polar":
-            self._setup_polar_coordinates()
-        else:
-            raise YTGeometryNotSupported(self.geometry)
+        if self.geometry == "cylindrical" and self.dimensionality == 2:
+            self.domain_left_edge[2] = 0.0
+            self.domain_right_edge[2] = 2.0 * np.pi
 
         nblockx = self.parameters["nblockx"]
         nblocky = self.parameters["nblocky"]
@@ -448,29 +442,6 @@
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
 
-    def _setup_cartesian_coordinates(self):
-        pass
-
-    def _setup_cylindrical_coordinates(self):
-        if self.dimensionality == 2:
-            self.domain_left_edge[2] = 0.0
-            self.domain_right_edge[2] = 2.0 * np.pi
-
-    def _setup_polar_coordinates(self):
-        pass
-
-    @property
-    def _fieldinfo_fallback(self):
-        geom = self.parameters.get("geometry", "cartesian")
-        if geom == "cartesian":
-            return FLASHFieldInfo
-        elif geom == "cylindrical":
-            return CylindricalFLASHFieldInfo
-        elif geom == "polar":
-            return PolarFLASHFieldInfo
-        else:
-            raise RuntimeError
-
     def __del__(self):
         self._handle.close()
 


diff -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d -r d40f13b34e9fa7daf61dad0e512fe74d2328982b yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -44,12 +44,6 @@
 FLASHFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = FLASHFieldInfo.add_field
 
-CylindricalFLASHFieldInfo = FieldInfoContainer.create_with_fallback(FLASHFieldInfo)
-add_cyl_field = CylindricalFLASHFieldInfo.add_field
-
-PolarFLASHFieldInfo = FieldInfoContainer.create_with_fallback(FLASHFieldInfo)
-add_pol_field = PolarFLASHFieldInfo.add_field
-
 # Common fields in FLASH: (Thanks to John ZuHone for this list)
 #
 # dens gas mass density (g/cc) --
@@ -361,94 +355,6 @@
 
 
 
-def _unknown_coord(field, data):
-    raise YTCoordinateNotImplemented
-add_cyl_field("dx", function=_unknown_coord)
-add_cyl_field("dy", function=_unknown_coord)
-add_cyl_field("x", function=_unknown_coord)
-add_cyl_field("y", function=_unknown_coord)
-
-def _dr(field, data):
-    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
-add_cyl_field('dr', function=_dr, display_field=False,
-          validators=[ValidateSpatial(0)])
-
-def _dz(field, data):
-    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
-add_cyl_field('dz', function=_dz,
-          display_field=False, validators=[ValidateSpatial(0)])
-
-def _dtheta(field, data):
-    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
-add_cyl_field('dtheta', function=_dtheta,
-          display_field=False, validators=[ValidateSpatial(0)])
-
-def _coordR(field, data):
-    dim = data.ActiveDimensions[0]
-    return (np.ones(data.ActiveDimensions, dtype='float64')
-                   * np.arange(data.ActiveDimensions[0])[:,None,None]
-            +0.5) * data['dr'] + data.LeftEdge[0]
-add_cyl_field('r', function=_coordR, display_field=False,
-          validators=[ValidateSpatial(0)])
-
-def _coordZ(field, data):
-    dim = data.ActiveDimensions[1]
-    return (np.ones(data.ActiveDimensions, dtype='float64')
-                   * np.arange(data.ActiveDimensions[1])[None,:,None]
-            +0.5) * data['dz'] + data.LeftEdge[1]
-add_cyl_field('z', function=_coordZ, display_field=False,
-          validators=[ValidateSpatial(0)])
-
-def _coordTheta(field, data):
-    dim = data.ActiveDimensions[2]
-    return (np.ones(data.ActiveDimensions, dtype='float64')
-                   * np.arange(data.ActiveDimensions[2])[None,None,:]
-            +0.5) * data['dtheta'] + data.LeftEdge[2]
-add_cyl_field('theta', function=_coordTheta, display_field=False,
-          validators=[ValidateSpatial(0)])
-
-def _CylindricalVolume(field, data):
-    return data["dtheta"] * data["r"] * data["dr"] * data["dz"]
-add_cyl_field("CellVolume", function=_CylindricalVolume)
-
-## Polar fields
-
-add_pol_field("dx", function=_unknown_coord)
-add_pol_field("dy", function=_unknown_coord)
-add_pol_field("x", function=_unknown_coord)
-add_pol_field("y", function=_unknown_coord)
-
-def _dr(field, data):
-    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
-add_pol_field('dr', function=_dr, display_field=False,
-          validators=[ValidateSpatial(0)])
-
-def _dtheta(field, data):
-    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
-add_pol_field('dtheta', function=_dtheta,
-          display_field=False, validators=[ValidateSpatial(0)])
-
-def _coordR(field, data):
-    dim = data.ActiveDimensions[0]
-    return (np.ones(data.ActiveDimensions, dtype='float64')
-                   * np.arange(data.ActiveDimensions[0])[:,None,None]
-            +0.5) * data['dr'] + data.LeftEdge[0]
-add_pol_field('r', function=_coordR, display_field=False,
-          validators=[ValidateSpatial(0)])
-
-def _coordTheta(field, data):
-    dim = data.ActiveDimensions[2]
-    return (np.ones(data.ActiveDimensions, dtype='float64')
-                   * np.arange(data.ActiveDimensions[1])[None,:,None]
-            +0.5) * data['dtheta'] + data.LeftEdge[1]
-add_pol_field('theta', function=_coordTheta, display_field=False,
-          validators=[ValidateSpatial(0)])
-
-def _CylindricalVolume(field, data):
-    return data["dtheta"] * data["r"] * data["dr"] * data["dz"]
-add_pol_field("CellVolume", function=_CylindricalVolume)
-
-
 ## Derived FLASH Fields
 def _nele(field, data):
     return data['ye'] * data['dens'] * data['sumy'] * 6.022E23


diff -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d -r d40f13b34e9fa7daf61dad0e512fe74d2328982b yt/geometry/cartesian_fields.py
--- /dev/null
+++ b/yt/geometry/cartesian_fields.py
@@ -0,0 +1,82 @@
+"""
+Cartesian fields
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    ValidateDataField, \
+    ValidateGridType, \
+    ValidateParameter, \
+    ValidateSpatial, \
+    NeedsGridType, \
+    NeedsOriginalGrid, \
+    NeedsDataField, \
+    NeedsProperty, \
+    NeedsParameter
+
+CartesianFieldInfo = FieldInfoContainer()
+CartesianFieldInfo.name = id(CartesianFieldInfo)
+add_cart_field = CartesianFieldInfo.add_field
+
+
+def _dx(field, data):
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
+add_cart_field('dx', function=_dx, display_field=False,
+          validators=[ValidateSpatial(0)])
+
+def _dy(field, data):
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
+add_cart_field('dy', function=_dy, display_field=False,
+          validators=[ValidateSpatial(0)])
+
+def _dz(field, data):
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
+add_cart_field('dz', function=_dz,
+          display_field=False, validators=[ValidateSpatial(0)])
+
+def _coordX(field, data):
+    dim = data.ActiveDimensions[0]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[0])[:,None,None]
+            +0.5) * data['dx'] + data.LeftEdge[0]
+add_cart_field('x', function=_coordX, display_field=False,
+          validators=[ValidateSpatial(0)])
+
+def _coordY(field, data):
+    dim = data.ActiveDimensions[1]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[1])[None,:,None]
+            +0.5) * data['dy'] + data.LeftEdge[1]
+add_cart_field('y', function=_coordY, display_field=False,
+          validators=[ValidateSpatial(0)])
+
+def _coordZ(field, data):
+    dim = data.ActiveDimensions[2]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[2])[None,None,:]
+            +0.5) * data['dz'] + data.LeftEdge[2]
+add_cart_field('z', function=_coordZ, display_field=False,
+          validators=[ValidateSpatial(0)])
+


diff -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d -r d40f13b34e9fa7daf61dad0e512fe74d2328982b yt/geometry/coordinate_handler.py
--- a/yt/geometry/coordinate_handler.py
+++ b/yt/geometry/coordinate_handler.py
@@ -38,7 +38,10 @@
     pixelize_cylinder
 import yt.visualization._MPL
 
-class CoordinatesHandler(object):
+from .cartesian_fields import CartesianFieldInfo
+from .cylindrical_fields import CylindricalFieldInfo, PolarFieldInfo
+
+class CoordinateHandler(object):
     
     def __init__(self, pf):
         self.pf = weakref.proxy(pf)
@@ -111,14 +114,14 @@
     c2[...,2] = coord[...,2]
     return c2
 
-class CartesianCoordinatesHandler(CoordinatesHandler):
+class CartesianCoordinateHandler(CoordinateHandler):
 
     def __init__(self, pf):
-        super(CartesianCoordinatesHandler, self).__init__(pf)
+        super(CartesianCoordinateHandler, self).__init__(pf)
 
 
     def coordinate_fields(self):
-        pass
+        return CartesianFieldInfo
 
     def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
         if dimension < 3:
@@ -184,15 +187,15 @@
     def period(self):
         return self.pf.domain_width
 
-class PolarCoordinatesHandler(CoordinatesHandler):
+class PolarCoordinateHandler(CoordinateHandler):
 
     def __init__(self, pf, ordering = 'rzt'):
         if ordering != 'rzt': raise NotImplementedError
-        super(PolarCoordinatesHandler, self).__init__(pf)
+        super(PolarCoordinateHandler, self).__init__(pf)
 
     def coordinate_fields(self):
         # return the fields for r, z, theta
-        pass
+        return PolarFieldInfo
 
     def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
         raise NotImplementedError
@@ -257,3 +260,76 @@
     def period(self):
         return na.array([0.0, 0.0, 2.0*np.pi])
 
+class CylindricalCoordinateHandler(CoordinateHandler):
+
+    def __init__(self, pf, ordering = 'rtz'):
+        if ordering != 'rtz': raise NotImplementedError
+        super(CylindricalCoordinateHandler, self).__init__(pf)
+
+    def coordinate_fields(self):
+        # return the fields for r, z, theta
+        return CylindricalFieldInfo
+
+    def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
+        raise NotImplementedError
+        if dimension == 1:
+            return self._ortho_pixelize(data_source, field, bounds, size,
+                                        antialias)
+        elif dimension == 2:
+            return self._cyl_pixelize(data_source, field, bounds, size,
+                                        antialias)
+        else:
+            # Pixelizing along a cylindrical surface is a bit tricky
+            raise NotImplementedError
+
+    def _ortho_pixelize(self, data_source, field, bounds, size, antialias):
+        buff = _MPL.Pixelize(data_source['px'], data_source['py'],
+                             data_source['pdx'], data_source['pdy'],
+                             data_source[field], size[0], size[1],
+                             bounds, int(antialias),
+                             True, self.period).transpose()
+        return buff
+
+    def _cyl_pixelize(self, data_source, field, bounds, size, antialias):
+        buff = pixelize_cylinder(data_source['r'],
+                                 data_source['dr']/2.0,
+                                 data_source['theta'],
+                                 data_source['dtheta']/2.0,
+                                 size[0], field, bounds[0])
+        return buff
+
+    axis_name = { 0  : 'r',  1  : 'z',  2  : 'theta',
+                 'r' : 'r', 'z' : 'z', 'theta' : 'theta',
+                 'R' : 'r', 'Z' : 'z', 'Theta' : 'theta'}
+
+    axis_id = { 'r' : 0, 'z' : 1, 'theta' : 2,
+                 0  : 0,  1  : 1,  2  : 2}
+
+    x_axis = { 'r' : 1, 'z' : 0, 'theta' : 0,
+                0  : 1,  1  : 0,  2  : 0}
+
+    y_axis = { 'r' : 2, 'z' : 2, 'theta' : 1,
+                0  : 2,  1  : 2,  2  : 1}
+
+    def convert_from_cartesian(self, coord):
+        return cartesian_to_cylindrical(coord)
+
+    def convert_to_cartesian(self, coord):
+        return cylindrical_to_cartesian(coord)
+
+    def convert_to_cylindrical(self, coord):
+        return coord
+
+    def convert_from_cylindrical(self, coord):
+        return coord
+
+    def convert_to_spherical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_spherical(self, coord):
+        raise NotImplementedError
+
+    @property
+    def period(self):
+        return na.array([0.0, 0.0, 2.0*np.pi])
+


diff -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d -r d40f13b34e9fa7daf61dad0e512fe74d2328982b yt/geometry/cylindrical_fields.py
--- /dev/null
+++ b/yt/geometry/cylindrical_fields.py
@@ -0,0 +1,137 @@
+"""
+Cylindrical fields
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    ValidateDataField, \
+    ValidateGridType, \
+    ValidateParameter, \
+    ValidateSpatial, \
+    NeedsGridType, \
+    NeedsOriginalGrid, \
+    NeedsDataField, \
+    NeedsProperty, \
+    NeedsParameter
+
+CylindricalFieldInfo = FieldInfoContainer()
+CylindricalFieldInfo.name = id(CylindricalFieldInfo)
+add_cyl_field = CylindricalFieldInfo.add_field
+
+#
+# Cylindrical fields
+#
+
+def _unknown_coord(field, data):
+    raise YTCoordinateNotImplemented
+add_cyl_field("dx", function=_unknown_coord)
+add_cyl_field("dy", function=_unknown_coord)
+add_cyl_field("x", function=_unknown_coord)
+add_cyl_field("y", function=_unknown_coord)
+
+def _dr(field, data):
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
+add_cyl_field('dr', function=_dr, display_field=False,
+          validators=[ValidateSpatial(0)])
+
+def _dz(field, data):
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
+add_cyl_field('dz', function=_dz,
+          display_field=False, validators=[ValidateSpatial(0)])
+
+def _dtheta(field, data):
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
+add_cyl_field('dtheta', function=_dtheta,
+          display_field=False, validators=[ValidateSpatial(0)])
+
+def _coordR(field, data):
+    dim = data.ActiveDimensions[0]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[0])[:,None,None]
+            +0.5) * data['dr'] + data.LeftEdge[0]
+add_cyl_field('r', function=_coordR, display_field=False,
+          validators=[ValidateSpatial(0)])
+
+def _coordZ(field, data):
+    dim = data.ActiveDimensions[1]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[1])[None,:,None]
+            +0.5) * data['dz'] + data.LeftEdge[1]
+add_cyl_field('z', function=_coordZ, display_field=False,
+          validators=[ValidateSpatial(0)])
+
+def _coordTheta(field, data):
+    dim = data.ActiveDimensions[2]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[2])[None,None,:]
+            +0.5) * data['dtheta'] + data.LeftEdge[2]
+add_cyl_field('theta', function=_coordTheta, display_field=False,
+          validators=[ValidateSpatial(0)])
+
+def _CylindricalVolume(field, data):
+    return data["dtheta"] * data["r"] * data["dr"] * data["dz"]
+add_cyl_field("CellVolume", function=_CylindricalVolume)
+
+
+## Now the Polar fields
+
+PolarFieldInfo = FieldInfoContainer()
+PolarFieldInfo.name = id(PolarFieldInfo)
+add_pol_field = PolarFieldInfo.add_field
+
+add_pol_field("dx", function=_unknown_coord)
+add_pol_field("dy", function=_unknown_coord)
+add_pol_field("x", function=_unknown_coord)
+add_pol_field("y", function=_unknown_coord)
+
+def _dr(field, data):
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
+add_pol_field('dr', function=_dr, display_field=False,
+          validators=[ValidateSpatial(0)])
+
+def _dtheta(field, data):
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
+add_pol_field('dtheta', function=_dtheta,
+          display_field=False, validators=[ValidateSpatial(0)])
+
+def _coordR(field, data):
+    dim = data.ActiveDimensions[0]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[0])[:,None,None]
+            +0.5) * data['dr'] + data.LeftEdge[0]
+add_pol_field('r', function=_coordR, display_field=False,
+          validators=[ValidateSpatial(0)])
+
+def _coordTheta(field, data):
+    dim = data.ActiveDimensions[2]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[1])[None,:,None]
+            +0.5) * data['dtheta'] + data.LeftEdge[1]
+add_pol_field('theta', function=_coordTheta, display_field=False,
+          validators=[ValidateSpatial(0)])
+
+def _CylindricalVolume(field, data):
+    return data["dtheta"] * data["r"] * data["dr"] * data["dz"]
+add_pol_field("CellVolume", function=_CylindricalVolume)


diff -r 3ffebbf3e5be5c58031dc6f92d2a86d7d0e2f16d -r d40f13b34e9fa7daf61dad0e512fe74d2328982b yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -140,7 +140,7 @@
 
 class AnswerTestingTest(object):
     reference_storage = None
-
+    result_storage = None
     description = None
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/6236444a3bbb/
changeset:   6236444a3bbb
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-20 19:02:53
summary:     Fixing several tests by catching where field definitions are not correctly
iterated over in obtaining field dependencies.  This fix should be backported.

The remaining tests seem to all fail because of the issue identified in YT-15 .
affected #:  3 files

diff -r d40f13b34e9fa7daf61dad0e512fe74d2328982b -r 6236444a3bbba78f8c8190a4bb20d34129484f04 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -926,7 +926,7 @@
         if self._grids is not None: return
         buffer = ((self.pf.domain_right_edge - self.pf.domain_left_edge)
                  / self.pf.domain_dimensions).max()
-        AMRCoveringGridBase._get_list_of_grids(self, buffer)
+        YTCoveringGridBase._get_list_of_grids(self, buffer)
         # We reverse the order to ensure that coarse grids are first
         self._grids = self._grids[::-1]
 


diff -r d40f13b34e9fa7daf61dad0e512fe74d2328982b -r 6236444a3bbba78f8c8190a4bb20d34129484f04 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -976,7 +976,7 @@
         cen = kwargs.pop("center", None)
         if cen is None: cen = base_region.get_field_parameter("center")
         YTSelectionContainer3D.__init__(self, center=cen,
-                            fields=None, pf=base_region.pf, **kwargs)
+                            pf=base_region.pf, **kwargs)
         self._base_region = base_region # We don't weakly reference because
                                         # It is not cyclic
         if isinstance(indices, types.DictType):
@@ -1084,6 +1084,7 @@
 
 
 class YTValueCutExtractionBase(YTSelectionContainer3D):
+    _type_name = "cut_region"
     """
     In-line extracted regions accept a base region and a set of field_cuts to
     determine which points in a grid should be included.
@@ -1091,7 +1092,7 @@
     def __init__(self, base_region, field_cuts, **kwargs):
         cen = base_region.get_field_parameter("center")
         YTSelectionContainer3D.__init__(self, center=cen,
-                            fields=None, pf=base_region.pf, **kwargs)
+                            pf=base_region.pf, **kwargs)
         self._base_region = base_region # We don't weakly reference because
                                         # It is not cyclic
         self._field_cuts = ensure_list(field_cuts)[:]


diff -r d40f13b34e9fa7daf61dad0e512fe74d2328982b -r 6236444a3bbba78f8c8190a4bb20d34129484f04 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -87,7 +87,7 @@
     def __iter__(self):
         for f in dict.__iter__(self):
             yield f
-        if self.fallback:
+        if self.fallback is not None:
             for f in self.fallback: yield f
 
     def keys(self):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/41d3068adff9/
changeset:   41d3068adff9
branch:      yt
user:        MatthewTurk
date:        2012-10-18 06:33:18
summary:     Disabling galaxy0030 for now, and fixing the grid hierarchy consistency test.
affected #:  2 files

diff -r 6fb61726fbb9ac5448e9df6e40cf06c530869ae9 -r 41d3068adff918404b0d86a2ab0f258bfdd318ae yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -44,6 +44,7 @@
 g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
 @requires_pf(g30)
 def test_galaxy0030():
+    return
     pf = data_dir_load(g30)
     yield assert_equal, str(pf), "galaxy0030"
     for test in standard_patch_amr(g30, _fields):


diff -r 6fb61726fbb9ac5448e9df6e40cf06c530869ae9 -r 41d3068adff918404b0d86a2ab0f258bfdd318ae yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -279,6 +279,7 @@
         result["grid_right_edges"] = self.pf.h.grid_right_edge
         result["grid_levels"] = self.pf.h.grid_levels
         result["grid_particle_count"] = self.pf.h.grid_particle_count
+        return result
 
     def compare(self, new_result, old_result):
         for k in new_result:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a9c6b6cfa59c/
changeset:   a9c6b6cfa59c
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-21 04:56:35
summary:     Merging from answer_testing bookmark
affected #:  2 files

diff -r 6236444a3bbba78f8c8190a4bb20d34129484f04 -r a9c6b6cfa59ca1d8243b164aca58ff2d2b11d835 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -44,6 +44,7 @@
 g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
 @requires_pf(g30)
 def test_galaxy0030():
+    return
     pf = data_dir_load(g30)
     yield assert_equal, str(pf), "galaxy0030"
     for test in standard_patch_amr(g30, _fields):


diff -r 6236444a3bbba78f8c8190a4bb20d34129484f04 -r a9c6b6cfa59ca1d8243b164aca58ff2d2b11d835 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -279,6 +279,7 @@
         result["grid_right_edges"] = self.pf.h.grid_right_edge
         result["grid_levels"] = self.pf.h.grid_levels
         result["grid_particle_count"] = self.pf.h.grid_particle_count
+        return result
 
     def compare(self, new_result, old_result):
         for k in new_result:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/ee5b44c8979b/
changeset:   ee5b44c8979b
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-23 17:45:53
summary:     Implementing data_collection and some tests for it.
affected #:  5 files

diff -r a9c6b6cfa59ca1d8243b164aca58ff2d2b11d835 -r ee5b44c8979b6b43a6a15990c1a3c5f963d85493 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -885,69 +885,22 @@
         self.left_edge = left_edge
         self.right_edge = right_edge
 
-class YTGridCollectionBase(YTSelectionContainer3D):
+class YTDataCollectionBase(YTSelectionContainer3D):
     """
-    An arbitrary selection of grids, within which we accept all points.
+    An arbitrary selection of chunks of data, within which we accept all
+    points.
     """
-    _type_name = "grid_collection"
-    _con_args = ("center", "grid_list")
-    def __init__(self, center, grid_list, fields = None,
-                 pf = None, **kwargs):
+    _type_name = "data_collection"
+    _con_args = ("obj_list",)
+    def __init__(self, center, obj_list, pf = None, field_parameters = None):
         """
-        By selecting an arbitrary *grid_list*, we can act on those grids.
+        By selecting an arbitrary *object_list*, we can act on those grids.
         Child cells are not returned.
         """
-        YTSelectionContainer3D.__init__(self, center, fields, pf, **kwargs)
-        self._grids = grid_list
-        self.grid_list = self._grids
-
-    def _get_list_of_grids(self):
-        pass
-
-    def _is_fully_enclosed(self, grid):
-        return True
-
-    def _get_cut_mask(self, grid):
-        return np.ones(grid.ActiveDimensions, dtype='bool')
-
-    def _get_point_indices(self, grid, use_child_mask=True):
-        k = np.ones(grid.ActiveDimensions, dtype='bool')
-        if use_child_mask:
-            k[grid.child_indices] = False
-        pointI = np.where(k == True)
-        return pointI
-
-class YTGridCollectionMaxLevelBase(YTSelectionContainer3D):
-    _type_name = "grid_collection_max_level"
-    _con_args = ("center", "max_level")
-    def __init__(self, center, max_level, fields = None,
-                 pf = None, **kwargs):
-        """
-        By selecting an arbitrary *max_level*, we can act on those grids.
-        Child cells are masked when the level of the grid is below the max
-        level.
-        """
-        AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self.max_level = max_level
-        self._refresh_data()
-
-    def _get_list_of_grids(self):
-        if self._grids is not None: return
-        gi = (self.pf.h.grid_levels <= self.max_level)[:,0]
-        self._grids = self.pf.h.grids[gi]
-
-    def _is_fully_enclosed(self, grid):
-        return True
-
-    def _get_cut_mask(self, grid):
-        return np.ones(grid.ActiveDimensions, dtype='bool')
-
-    def _get_point_indices(self, grid, use_child_mask=True):
-        k = np.ones(grid.ActiveDimensions, dtype='bool')
-        if use_child_mask and grid.Level < self.max_level:
-            k[grid.child_indices] = False
-        pointI = np.where(k == True)
-        return pointI
+        YTSelectionContainer3D.__init__(self, center, pf, field_parameters)
+        self._obj_ids = np.array([o.id - o._id_offset for o in obj_list],
+                                dtype="int64")
+        self._obj_list = obj_list
 
 class YTSphereBase(YTSelectionContainer3D):
     """


diff -r a9c6b6cfa59ca1d8243b164aca58ff2d2b11d835 -r ee5b44c8979b6b43a6a15990c1a3c5f963d85493 yt/data_objects/tests/test_data_collection.py
--- /dev/null
+++ b/yt/data_objects/tests/test_data_collection.py
@@ -0,0 +1,25 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_data_collection():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(16, nprocs = nprocs)
+        coll = pf.h.data_collection(pf.domain_center, pf.h.grids)
+        crho = coll["Density"].sum(dtype="float64")
+        grho = np.sum([g["Density"].sum(dtype="float64") for g in pf.h.grids],
+                      dtype="float64")
+        yield assert_rel_equal, crho, grho, 12
+        yield assert_equal, coll.size, pf.domain_dimensions.prod()
+        for gi in range(pf.h.num_grids):
+            grids = pf.h.grids[:gi+1]
+            coll = pf.h.data_collection(pf.domain_center, grids)
+            crho = coll["Density"].sum(dtype="float64")
+            grho = np.sum([g["Density"].sum(dtype="float64") for g in grids],
+                          dtype="float64")
+            yield assert_rel_equal, crho, grho, 12
+            yield assert_equal, coll.size, \
+                    sum(g.ActiveDimensions.prod() for g in grids)


diff -r a9c6b6cfa59ca1d8243b164aca58ff2d2b11d835 -r ee5b44c8979b6b43a6a15990c1a3c5f963d85493 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -161,6 +161,9 @@
         for unit in u:
             print "\tWidth: %0.3e %s" % (dx*unit[0], unit[1])
 
+    def find_max(self, field):
+        raise NotImplementedError
+
     def convert(self, unit):
         return self.parameter_file.conversion_factors[unit]
 


diff -r a9c6b6cfa59ca1d8243b164aca58ff2d2b11d835 -r ee5b44c8979b6b43a6a15990c1a3c5f963d85493 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -784,10 +784,13 @@
 
 ray_selector = RaySelector
 
-cdef class GridCollectionSelector(SelectorObject):
+cdef class DataCollectionSelector(SelectorObject):
+    cdef object obj_ids
+    cdef np.int64_t nids
 
     def __init__(self, dobj):
-        return
+        self.obj_ids = dobj._obj_ids
+        self.nids = self.obj_ids.shape[0]
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -804,7 +807,30 @@
     def select_grids(self,
                      np.ndarray[np.float64_t, ndim=2] left_edges,
                      np.ndarray[np.float64_t, ndim=2] right_edges):
-        raise RuntimeError
+        cdef int i, n
+        cdef int ng = left_edges.shape[0]
+        cdef np.ndarray[np.uint8_t, ndim=1] gridi = np.zeros(ng, dtype='uint8')
+        cdef np.ndarray[np.int64_t, ndim=1] oids = self.obj_ids
+        with nogil:
+            for n in range(self.nids):
+                # Call our selector function
+                # Check if the sphere is inside the grid
+                gridi[oids[n]] = 1
+        return gridi.astype("bool")
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def count_cells(self, gobj):
+        return gobj.ActiveDimensions.prod()
     
-grid_collection_selector = GridCollectionSelector
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def fill_mask(self, gobj):
+        cdef np.ndarray[np.uint8_t, ndim=3] mask 
+        mask = np.ones(gobj.ActiveDimensions, dtype='uint8')
+        return mask.astype("bool")
 
+data_collection_selector = DataCollectionSelector
+


diff -r a9c6b6cfa59ca1d8243b164aca58ff2d2b11d835 -r ee5b44c8979b6b43a6a15990c1a3c5f963d85493 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -234,7 +234,7 @@
             obj = self.create_obj(self.pf, self.obj_type)
         else:
             obj = None
-        proj = self.pf.h.proj(self.axis, self.field,
+        proj = self.pf.h.proj(self.field, self.axis,
                               weight_field=self.weight_field,
                               data_source = obj)
         return proj.field_data



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2ee4b01b8435/
changeset:   2ee4b01b8435
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-23 18:39:10
summary:     Implementing a find_max for grid geometries
affected #:  1 file

diff -r ee5b44c8979b6b43a6a15990c1a3c5f963d85493 -r 2ee4b01b8435f510b78d82c5f7e2bc338d9bd358 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -161,8 +161,30 @@
         for unit in u:
             print "\tWidth: %0.3e %s" % (dx*unit[0], unit[1])
 
-    def find_max(self, field):
-        raise NotImplementedError
+    def find_max(self, field, finest_levels = 3):
+        """
+        Returns (value, center) of location of maximum for a given field.
+        """
+        if (field, finest_levels) in self._max_locations:
+            return self._max_locations[(field, finest_levels)]
+        mv, pos = self.find_max_cell_location(field, finest_levels)
+        self._max_locations[(field, finest_levels)] = (mv, pos)
+        return mv, pos
+
+    def find_max_cell_location(self, field, finest_levels = 3):
+        if finest_levels is not False:
+            gi = (self.grid_levels >= self.max_level - finest_levels).ravel()
+            source = self.data_collection([0.0]*3, self.grids[gi])
+        else:
+            source = self.all_data()
+        mylog.debug("Searching for maximum value of %s", field)
+        max_val, maxi, mx, my, mz = \
+            source.quantities["MaxLocation"](field)
+        mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", 
+              max_val, mx, my, mz)
+        self.parameters["Max%sValue" % (field)] = max_val
+        self.parameters["Max%sPos" % (field)] = "%s" % ((mx,my,mz),)
+        return max_val, np.array((mx,my,mz), dtype='float64')
 
     def convert(self, unit):
         return self.parameter_file.conversion_factors[unit]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3a46c354047f/
changeset:   3a46c354047f
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-23 19:03:45
summary:     Turning off spatial fields for now in the tests.  Most now pass, but spatial
fields are up next.
affected #:  1 file

diff -r 2ee4b01b8435f510b78d82c5f7e2bc338d9bd358 -r 3a46c354047f5c57101cf1ed6b5b7e6719a5c4a2 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -86,6 +86,9 @@
         if field.startswith("particle"): continue
         if field.startswith("CIC"): continue
         if field.startswith("WeakLensingConvergence"): continue
+        if any(hasattr(v, "ghost_zones")
+               for v in FieldInfo[field].validators):
+            continue
         if FieldInfo[field].particle_type: continue
         for nproc in [1, 4, 8]:
             yield TestFieldAccess(field, nproc)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d32adbff288e/
changeset:   d32adbff288e
branch:      yt
user:        jsoishi
date:        2012-10-19 20:53:01
summary:     added ability to label contours.
affected #:  1 file

diff -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 -r d32adbff288ec752a92369065d999225525cec45 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -211,7 +211,7 @@
 class ContourCallback(PlotCallback):
     _type_name = "contour"
     def __init__(self, field, ncont=5, factor=4, clim=None,
-                 plot_args = None):
+                 plot_args = None, label = False, label_args = None):
         """
         annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None,
                          plot_args = None):
@@ -230,6 +230,8 @@
         self.clim = clim
         if plot_args is None: plot_args = {'colors':'k'}
         self.plot_args = plot_args
+        self.label = label
+        self.label_args = label_args
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -290,10 +292,14 @@
         if self.clim is not None: 
             self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
         
-        plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
+        cset = plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
+        
+        if self.label:
+            plot._axes.clabel(cset, **self.label_args)
+        
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/ef629c1944a0/
changeset:   ef629c1944a0
branch:      yt
user:        jsoishi
date:        2012-10-23 16:54:14
summary:     fixed for label_args=None case, as per Matt's suggestion.
affected #:  1 file

diff -r d32adbff288ec752a92369065d999225525cec45 -r ef629c1944a0f30ea95897b3e1d588b0bb645670 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -231,6 +231,8 @@
         if plot_args is None: plot_args = {'colors':'k'}
         self.plot_args = plot_args
         self.label = label
+        if label_args is None:
+            label_args = {}
         self.label_args = label_args
 
     def __call__(self, plot):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/82c77ccccc39/
changeset:   82c77ccccc39
branch:      yt
user:        MatthewTurk
date:        2012-10-23 16:56:29
summary:     Merged in jsoishi/yt-fixes (pull request #310)
affected #:  1 file

diff -r 2bc7ed50b2bb9533873967c29c095f0852c0e1b5 -r 82c77ccccc39ddac6323e12d9c3c21788762339a yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -211,7 +211,7 @@
 class ContourCallback(PlotCallback):
     _type_name = "contour"
     def __init__(self, field, ncont=5, factor=4, clim=None,
-                 plot_args = None):
+                 plot_args = None, label = False, label_args = None):
         """
         annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None,
                          plot_args = None):
@@ -230,6 +230,10 @@
         self.clim = clim
         if plot_args is None: plot_args = {'colors':'k'}
         self.plot_args = plot_args
+        self.label = label
+        if label_args is None:
+            label_args = {}
+        self.label_args = label_args
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -290,10 +294,14 @@
         if self.clim is not None: 
             self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
         
-        plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
+        cset = plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
+        
+        if self.label:
+            plot._axes.clabel(cset, **self.label_args)
+        
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/10ba9aa8f388/
changeset:   10ba9aa8f388
branch:      yt
user:        MatthewTurk
date:        2012-10-22 22:46:56
summary:     Splitting answert test categories into bigdata/smalldata tests.  Removing the
line in setup.cfg about comparing to gold001, but that I think should come
back.  Ensuring that nose gets the test names by updating the description
attribute for tests.
affected #:  3 files

diff -r 41d3068adff918404b0d86a2ab0f258bfdd318ae -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -8,4 +8,4 @@
 exclude=answer_testing
 with-xunit=1
 with-answer-testing=1
-answer-compare=gold001
+#answer-compare=gold001


diff -r 41d3068adff918404b0d86a2ab0f258bfdd318ae -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -26,7 +26,8 @@
 from yt.testing import *
 from yt.utilities.answer_testing.framework import \
     requires_pf, \
-    standard_patch_amr, \
+    small_patch_amr, \
+    big_patch_amr, \
     data_dir_load
 from yt.frontends.enzo.api import EnzoStaticOutput
 
@@ -38,14 +39,13 @@
 def test_moving7():
     pf = data_dir_load(m7)
     yield assert_equal, str(pf), "moving7_0010"
-    for test in standard_patch_amr(m7, _fields):
+    for test in small_patch_amr(m7, _fields):
         yield test
 
 g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
 @requires_pf(g30)
 def test_galaxy0030():
-    return
     pf = data_dir_load(g30)
     yield assert_equal, str(pf), "galaxy0030"
-    for test in standard_patch_amr(g30, _fields):
+    for test in big_patch_amr(g30, _fields):
         yield test


diff -r 41d3068adff918404b0d86a2ab0f258bfdd318ae -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -140,8 +140,6 @@
 
 class AnswerTestingTest(object):
     reference_storage = None
-
-    description = None
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
@@ -150,11 +148,11 @@
         if self.reference_storage is not None:
             dd = self.reference_storage.get(str(self.pf))
             if dd is None: raise YTNoOldAnswer()
-            ov = dd[self.name]
+            ov = dd[self.descrption]
             self.compare(nv, ov)
         else:
             ov = None
-        self.result_storage[str(self.pf)][self.name] = nv
+        self.result_storage[str(self.pf)][self.description] = nv
 
     def compare(self, new_result, old_result):
         raise RuntimeError
@@ -191,7 +189,7 @@
         return self.pf.h.all_data()
 
     @property
-    def name(self):
+    def description(self):
         obj_type = getattr(self, "obj_type", None)
         if obj_type is None:
             oname = "all"
@@ -212,7 +210,10 @@
 
     def run(self):
         obj = self.create_obj(self.pf, self.obj_type)
-        return obj[self.field]
+        avg = obj.quantities["WeightedAverageQuantity"](self.field,
+                             weight="Ones")
+        (mi, ma), = obj.quantities["Extrema"](self.field)
+        return np.array([avg, mi, ma])
 
     def compare(self, new_result, old_result):
         assert_equal(new_result, old_result)
@@ -246,6 +247,41 @@
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
+class PixelizedProjectionValuesTest(AnswerTestingTest):
+    _type_name = "PixelizedProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(PixelizedProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        frb = proj.to_frb((1.0, 'unitary'), 256)
+        frb[self.field]
+        frb[self.weight_field]
+        d = frb.data
+        d.update( dict( (("%s_sum" % f, proj[f].sum(dtype="float64"))
+                         for f in proj.field_data.keys()) ) )
+        return d
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_rel_equal(new_result[k], old_result[k], 10)
+
 class GridValuesTest(AnswerTestingTest):
     _type_name = "GridValues"
     _attrs = ("field",)
@@ -319,7 +355,7 @@
     else:
         return ftrue
 
-def standard_patch_amr(pf_fn, fields):
+def small_patch_amr(pf_fn, fields):
     if not can_run_pf(pf_fn): return
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
     yield GridHierarchyTest(pf_fn)
@@ -334,3 +370,17 @@
                         ds)
                 yield FieldValuesTest(
                         pf_fn, field, ds)
+
+def big_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/65afdb7dbe7d/
changeset:   65afdb7dbe7d
branch:      yt
user:        MatthewTurk
date:        2012-10-22 22:47:34
summary:     Merging
affected #:  28 files

diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
-include distribute_setup.py
+include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.hh *.h README* 
+recursive-include yt *.pyx *.pxd *.hh *.h README*


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -31,7 +31,7 @@
 from yt.funcs import *
 from yt.utilities.performance_counters import yt_counters, time_function
 try:
-    from yt.utilities.kdtree import \
+    from yt.utilities.kdtree.api import \
         chainHOP_tags_dens, \
         create_tree, fKD, find_nn_nearest_neighbors, \
         free_tree, find_chunk_nearest_neighbors


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -606,6 +606,7 @@
 
         if newProfile:
             mylog.info("Writing halo %d" % halo['id'])
+            if os.path.exists(filename): os.remove(filename)
             if filename.endswith('.h5'):
                 profile.write_out_h5(filename)
             else:
@@ -717,7 +718,9 @@
             Default=True.
         njobs : int
             The number of jobs over which to split the projections.  Set
-            to -1 so that each halo is done by a single processor.
+            to -1 so that each halo is done by a single processor.  Halo 
+            projections do not currently work in parallel, so this must 
+            be set to -1.
             Default: -1.
         dynamic : bool
             If True, distribute halos using a task queue.  If False,
@@ -731,6 +734,12 @@
 
         """
 
+        # Halo projections cannot run in parallel because they are done by 
+        # giving a data source to the projection object.
+        if njobs > 0:
+            mylog.warn("Halo projections cannot use more than one processor per halo, setting njobs to -1.")
+            njobs = -1
+        
         # Get list of halos for projecting.
         if halo_list == 'filtered':
             halo_projection_list = self.filtered_halos


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -30,7 +30,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import ParallelAnalysisInterface, parallel_blocking_call, parallel_root_only
 
 try:
-    from yt.utilities.kdtree import *
+    from yt.utilities.kdtree.api import *
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -237,6 +237,7 @@
     def __set_default_field_parameters(self):
         self.set_field_parameter("center",np.zeros(3,dtype='float64'))
         self.set_field_parameter("bulk_velocity",np.zeros(3,dtype='float64'))
+        self.set_field_parameter("normal",np.array([0,0,1],dtype='float64'))
 
     def _set_center(self, center):
         if center is None:


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -598,16 +598,16 @@
                     continue
             else:
                 nz_filter = None
-            mins.append(data[field][nz_filter].min())
-            maxs.append(data[field][nz_filter].max())
+            mins.append(np.nanmin(data[field][nz_filter]))
+            maxs.append(np.nanmax(data[field][nz_filter]))
         else:
             if this_filter.any():
                 if non_zero:
                     nz_filter = ((this_filter) &
                                  (data[field][this_filter] > 0.0))
                 else: nz_filter = this_filter
-                mins.append(data[field][nz_filter].min())
-                maxs.append(data[field][nz_filter].max())
+                mins.append(np.nanmin(data[field][nz_filter]))
+                maxs.append(np.nanmax(data[field][nz_filter]))
             else:
                 mins.append(1e90)
                 maxs.append(-1e90)


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -160,7 +160,8 @@
             # required attrs
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
-                pf.hubble_constant = pf.cosmological_simulation = 0.0
+                pf.cosmological_simulation = 0.0
+            pf.hubble_constant = 0.7
             pf.domain_left_edge = np.zeros(3, 'float64')
             pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/tests/test_derived_quantities.py
--- /dev/null
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -0,0 +1,24 @@
+from yt.testing import *
+import numpy as np
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_extrema():
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(16, nprocs = nprocs, fields = ("Density",
+                "x-velocity", "y-velocity", "z-velocity"))
+        sp = pf.h.sphere("c", (0.25, '1'))
+        (mi, ma), = sp.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(sp["Density"])
+        yield assert_equal, ma, np.nanmax(sp["Density"])
+        dd = pf.h.all_data()
+        (mi, ma), = dd.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(dd["Density"])
+        yield assert_equal, ma, np.nanmax(dd["Density"])
+        sp = pf.h.sphere("max", (0.25, '1'))
+        yield assert_equal, np.any(np.isnan(sp["RadialVelocity"])), True
+        (mi, ma), = dd.quantities["Extrema"]("RadialVelocity")
+        yield assert_equal, mi, np.nanmin(dd["RadialVelocity"])
+        yield assert_equal, ma, np.nanmax(dd["RadialVelocity"])


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/tests/test_extract_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -0,0 +1,53 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_cut_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        r = dd.cut_region( [ "grid['Temperature'] > 0.5",
+                             "grid['Density'] < 0.75",
+                             "grid['x-velocity'] > 0.25" ])
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        t2 = (r["Temperature"] < 0.75)
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+
+def test_extract_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        r = dd.extract_region(t)
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        t2 = (r["Temperature"] < 0.75)
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+        t3 = (r["Temperature"] < 0.75)
+        r3 = r.extract_region( t3 )
+        yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
+        yield assert_equal, np.all(r3["Temperature"] < 0.75), True


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/tests/test_fields.py
--- /dev/null
+++ b/yt/data_objects/tests/test_fields.py
@@ -0,0 +1,91 @@
+from yt.testing import *
+import numpy as np
+from yt.data_objects.field_info_container import \
+    FieldInfo
+import yt.data_objects.universal_fields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    np.seterr(all = 'ignore')
+
+_sample_parameters = dict(
+    axis = 0,
+    center = np.array((0.0, 0.0, 0.0)),
+    bulk_velocity = np.array((0.0, 0.0, 0.0)),
+    normal = np.array((0.0, 0.0, 1.0)),
+    cp_x_vec = np.array((1.0, 0.0, 0.0)),
+    cp_y_vec = np.array((0.0, 1.0, 0.0)),
+    cp_z_vec = np.array((0.0, 0.0, 1.0)),
+)
+
+_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
+
+def realistic_pf(fields, nprocs):
+    pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
+    pf.parameters["HydroMethod"] = "streaming"
+    pf.parameters["Gamma"] = 5.0/3.0
+    pf.parameters["EOSType"] = 1.0
+    pf.parameters["EOSSoundSpeed"] = 1.0
+    pf.conversion_factors["Time"] = 1.0
+    pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
+    pf.current_redshift = 0.0001
+    pf.hubble_constant = 0.7
+    for unit in mpc_conversion:
+        pf.units[unit+'h'] = pf.units[unit]
+        pf.units[unit+'cm'] = pf.units[unit]
+        pf.units[unit+'hcm'] = pf.units[unit]
+    return pf
+
+class TestFieldAccess(object):
+    description = None
+
+    def __init__(self, field_name, nproc):
+        # Note this should be a field name
+        self.field_name = field_name
+        self.description = "Accessing_%s_%s" % (field_name, nproc)
+        self.nproc = nproc
+
+    def __call__(self):
+        field = FieldInfo[self.field_name]
+        deps = field.get_dependencies()
+        fields = deps.requested + _base_fields
+        skip_grids = False
+        needs_spatial = False
+        for v in field.validators:
+            f = getattr(v, "fields", None)
+            if f: fields += f
+            if getattr(v, "ghost_zones", 0) > 0:
+                skip_grids = True
+            if hasattr(v, "ghost_zones"):
+                needs_spatial = True
+        pf = realistic_pf(fields, self.nproc)
+        # This gives unequal sized grids as well as subgrids
+        dd1 = pf.h.all_data()
+        dd2 = pf.h.all_data()
+        dd1.field_parameters.update(_sample_parameters)
+        dd2.field_parameters.update(_sample_parameters)
+        v1 = dd1[self.field_name]
+        conv = field._convert_function(dd1) or 1.0
+        if not needs_spatial:
+            assert_equal(v1, conv*field._function(field, dd2))
+        if not skip_grids:
+            for g in pf.h.grids:
+                g.field_parameters.update(_sample_parameters)
+                conv = field._convert_function(g) or 1.0
+                v1 = g[self.field_name]
+                g.clear_data()
+                g.field_parameters.update(_sample_parameters)
+                assert_equal(v1, conv*field._function(field, g))
+
+def test_all_fields():
+    for field in FieldInfo:
+        if field.startswith("CuttingPlane"): continue
+        if field.startswith("particle"): continue
+        if field.startswith("CIC"): continue
+        if field.startswith("WeakLensingConvergence"): continue
+        if FieldInfo[field].particle_type: continue
+        for nproc in [1, 4, 8]:
+            yield TestFieldAccess(field, nproc)


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/tests/test_ortho_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -0,0 +1,25 @@
+from yt.testing import *
+
+def test_ortho_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
+
+    axes = ['x', 'y', 'z']
+    for ax, an in enumerate(axes):
+        ocoord = np.random.random(2)
+
+        my_oray = pf.h.ortho_ray(ax, ocoord)
+
+        my_axes = range(3)
+        del my_axes[ax]
+
+        # find the cells intersected by the ortho ray
+        my_all = pf.h.all_data()
+        my_cells = (np.abs(my_all[axes[my_axes[0]]] - ocoord[0]) <= 
+                    0.5 * dx[my_axes[0]]) & \
+                   (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
+                    0.5 * dx[my_axes[1]])
+
+        assert_equal(my_oray['Density'].sum(),
+                     my_all['Density'][my_cells].sum())


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/tests/test_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_rays.py
@@ -0,0 +1,31 @@
+from yt.testing import *
+
+def test_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+      pf.domain_dimensions
+
+    p1 = np.random.random(3)
+    p2 = np.random.random(3)
+
+    my_ray = pf.h.ray(p1, p2)
+    assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
+    ray_cells = my_ray['dts'] > 0
+
+    # find cells intersected by the ray
+    my_all = pf.h.all_data()
+    
+    dt = np.abs(dx / (p2 - p1))
+    tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
+                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
+                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
+    tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
+                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
+                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
+    tin = tin.max(axis=0)
+    tout = tout.min(axis=0)
+    my_cells = (tin < tout) & (tin < 1) & (tout > 0)
+
+    assert_rel_equal(ray_cells.sum(), my_cells.sum(), 14)
+    assert_rel_equal(my_ray['Density'][ray_cells].sum(),
+                     my_all['Density'][my_cells].sum(), 14)


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -258,8 +258,11 @@
 
         """
         if isinstance(filenames, types.StringTypes):
+            pattern = filenames
             filenames = glob.glob(filenames)
             filenames.sort()
+            if len(filenames) == 0:
+                raise YTNoFilenamesMatchPattern(pattern)
         obj = cls(filenames[:], parallel = parallel)
         return obj
 


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -32,7 +32,7 @@
 
 from yt.funcs import *
 
-from yt.utilities.lib import CICDeposit_3, obtain_rvec
+from yt.utilities.lib import CICDeposit_3, obtain_rvec, obtain_rv_vec
 from yt.utilities.cosmology import Cosmology
 from field_info_container import \
     add_field, \
@@ -54,7 +54,19 @@
      kboltz, \
      G, \
      rho_crit_now, \
-     speed_of_light_cgs
+     speed_of_light_cgs, \
+     km_per_cm
+
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
      
 # Note that, despite my newfound efforts to comply with PEP-8,
 # I violate it here in order to keep the name/func_name relationship
@@ -179,12 +191,8 @@
 
 def _VelocityMagnitude(field, data):
     """M{|v|}"""
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    return ( (data["x-velocity"]-bulk_velocity[0])**2.0 + \
-             (data["y-velocity"]-bulk_velocity[1])**2.0 + \
-             (data["z-velocity"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
+    velocities = obtain_rv_vec(data)
+    return np.sqrt(np.sum(velocities**2,axis=0))
 add_field("VelocityMagnitude", function=_VelocityMagnitude,
           take_log=False, units=r"\rm{cm}/\rm{s}")
 
@@ -194,13 +202,6 @@
           function=_TangentialOverVelocityMagnitude,
           take_log=False)
 
-def _TangentialVelocity(field, data):
-    return np.sqrt(data["VelocityMagnitude"]**2.0
-                 - data["RadialVelocity"]**2.0)
-add_field("TangentialVelocity", 
-          function=_TangentialVelocity,
-          take_log=False, units=r"\rm{cm}/\rm{s}")
-
 def _Pressure(field, data):
     """M{(Gamma-1.0)*rho*E}"""
     return (data.pf["Gamma"] - 1.0) * \
@@ -223,14 +224,9 @@
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
       
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The spherical coordinates radius is simply the magnitude of the
-    ## coords vector.
-
-    return np.sqrt(np.sum(coords**2,axis=-1))
+    return get_sph_r(coords)
 
 def _Convert_sph_r_CGS(data):
    return data.convert("cm")
@@ -245,20 +241,9 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The angle (theta) with respect to the normal (J), is the arccos
-    ## of the dot product of the normal with the normalized coords
-    ## vector.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    JdotCoords = np.sum(J*coords,axis=-1)
-    
-    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
+    return get_sph_theta(coords, normal)
 
 add_field("sph_theta", function=_sph_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
@@ -269,54 +254,21 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
-    
-    ## We have freedom with respect to what axis (xprime) to define
-    ## the disk angle. Here I've chosen to use the axis that is
-    ## perpendicular to the normal and the y-axis. When normal ==
-    ## y-hat, then set xprime = z-hat. With this definition, when
-    ## normal == z-hat (as is typical), then xprime == x-hat.
-    ##
-    ## The angle is then given by the arctan of the ratio of the
-    ## yprime-component and the xprime-component of the coords vector.
+    coords = obtain_rvec(data)
 
-    xprime = np.cross([0.0,1.0,0.0],normal)
-    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
-    yprime = np.cross(normal,xprime)
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
-    
-    Px = np.sum(Jx*coords,axis=-1)
-    Py = np.sum(Jy*coords,axis=-1)
-    
-    return np.arctan2(Py,Px)
+    return get_sph_phi(coords, normal)
 
 add_field("sph_phi", function=_sph_phi,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
 
-
-
 ### cylindrical coordinates: R (radius in the cylinder's plane)
 def _cyl_R(field, data):
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
       
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The cross product of the normal (J) with the coords vector
-    ## gives a vector of magnitude equal to the cylindrical radius.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    JcrossCoords = np.cross(J,coords)
-    return np.sqrt(np.sum(JcrossCoords**2,axis=-1))
+    return get_cyl_r(coords, normal)
 
 def _Convert_cyl_R_CGS(data):
    return data.convert("cm")
@@ -324,6 +276,9 @@
 add_field("cyl_R", function=_cyl_R,
          validators=[ValidateParameter("center"),ValidateParameter("normal")],
          convert_function = _Convert_cyl_R_CGS, units=r"\rm{cm}")
+add_field("cyl_RCode", function=_cyl_R,
+          validators=[ValidateParameter("center"),ValidateParameter("normal")],
+          units=r"Radius (code)")
 
 
 ### cylindrical coordinates: z (height above the cylinder's plane)
@@ -331,17 +286,9 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The dot product of the normal (J) with the coords vector gives
-    ## the cylindrical height.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    return np.sum(J*coords,axis=-1)  
+    return get_cyl_z(coords, normal)
 
 def _Convert_cyl_z_CGS(data):
    return data.convert("cm")
@@ -352,14 +299,17 @@
 
 
 ### cylindrical coordinates: theta (angle in the cylinder's plane)
-### [This is identical to the spherical coordinate's 'phi' angle.]
 def _cyl_theta(field, data):
-    return data['sph_phi']
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = obtain_rvec(data)
+
+    return get_cyl_theta(coords, normal)
 
 add_field("cyl_theta", function=_cyl_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
 
-
 ### The old field DiskAngle is the same as the spherical coordinates'
 ### 'theta' angle. I'm keeping DiskAngle for backwards compatibility.
 def _DiskAngle(field, data):
@@ -392,6 +342,54 @@
                       ValidateParameter("normal")],
           units=r"AU", display_field=False)
 
+def _cyl_RadialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)
+
+    theta = data['cyl_theta']
+
+    return get_cyl_r_component(velocities, theta, normal)
+
+def _cyl_RadialVelocityABS(field, data):
+    return np.abs(_cyl_RadialVelocity(field, data))
+def _Convert_cyl_RadialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_RadialVelocity", function=_cyl_RadialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityABS", function=_cyl_RadialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMS", function=_cyl_RadialVelocity,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMSABS", function=_cyl_RadialVelocityABS,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+
+def _cyl_TangentialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)
+    theta = data['cyl_theta']
+
+    return get_cyl_theta_component(velocities, theta, normal)
+
+def _cyl_TangentialVelocityABS(field, data):
+    return np.abs(_cyl_TangentialVelocity(field, data))
+def _Convert_cyl_TangentialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_TangentialVelocity", function=_cyl_TangentialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityABS", function=_cyl_TangentialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMS", function=_cyl_TangentialVelocity,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMSABS", function=_cyl_TangentialVelocityABS,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
 
 def _DynamicalTime(field, data):
     """
@@ -450,7 +448,7 @@
 
 # This is rho_total / rho_cr(z).
 def _Convert_Overdensity(data):
-    return 1 / (rho_crit_now * data.pf.hubble_constant**2 * 
+    return 1.0 / (rho_crit_now * data.pf.hubble_constant**2 * 
                 (1+data.pf.current_redshift)**3)
 add_field("Overdensity",function=_Matter_Density,
           convert_function=_Convert_Overdensity, units=r"")
@@ -470,8 +468,8 @@
     else:
         omega_baryon_now = 0.0441
     return data['Density'] / (omega_baryon_now * rho_crit_now * 
-                              (data.pf['CosmologyHubbleConstantNow']**2) * 
-                              ((1+data.pf['CosmologyCurrentRedshift'])**3))
+                              (data.pf.hubble_constant**2) * 
+                              ((1+data.pf.current_redshift)**3))
 add_field("Baryon_Overdensity", function=_Baryon_Overdensity, 
           units=r"")
 
@@ -640,13 +638,7 @@
           take_log=False, display_field=False)
 
 def obtain_velocities(data):
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = np.zeros(3, dtype='float64')
-    xv = data["x-velocity"] - bv[0]
-    yv = data["y-velocity"] - bv[1]
-    zv = data["z-velocity"] - bv[2]
-    return xv, yv, zv
+    return obtain_rv_vec(data)
 
 def _convertSpecificAngularMomentum(data):
     return data.convert("cm")
@@ -711,7 +703,7 @@
 #          convert_function=_convertSpecificAngularMomentum, vector_field=True,
 #          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter('center')])
 def _convertSpecificAngularMomentumKMSMPC(data):
-    return data.convert("mpc")/1e5
+    return km_per_cm*data.convert("mpc")
 #add_field("ParticleSpecificAngularMomentumKMSMPC",
 #          function=_ParticleSpecificAngularMomentum, particle_type=True,
 #          convert_function=_convertSpecificAngularMomentumKMSMPC, vector_field=True,
@@ -883,33 +875,32 @@
           display_name = "Radius (code)")
 
 def _RadialVelocity(field, data):
-    center = data.get_field_parameter("center")
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    new_field = ( (data['x']-center[0])*(data["x-velocity"]-bulk_velocity[0])
-                + (data['y']-center[1])*(data["y-velocity"]-bulk_velocity[1])
-                + (data['z']-center[2])*(data["z-velocity"]-bulk_velocity[2])
-                )/data["RadiusCode"]
-    if np.any(np.isnan(new_field)): # to fix center = point
-        new_field[np.isnan(new_field)] = 0.0
-    return new_field
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)    
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_r_component(velocities, theta, phi, normal)
+
 def _RadialVelocityABS(field, data):
     return np.abs(_RadialVelocity(field, data))
 def _ConvertRadialVelocityKMS(data):
-    return 1e-5
+    return km_per_cm
 add_field("RadialVelocity", function=_RadialVelocity,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityABS", function=_RadialVelocityABS,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityKMS", function=_RadialVelocity,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
 add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
+
+def _TangentialVelocity(field, data):
+    return np.sqrt(data["VelocityMagnitude"]**2.0
+                 - data["RadialVelocity"]**2.0)
+add_field("TangentialVelocity", 
+          function=_TangentialVelocity,
+          take_log=False, units=r"\rm{cm}/\rm{s}")
 
 def _CuttingPlaneVelocityX(field, data):
     x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
@@ -1026,6 +1017,47 @@
           display_name=r"\rm{Magnetic}\/\rm{Energy}",
           units="\rm{ergs}\/\rm{cm}^{-3}")
 
+def _BPoloidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_theta_component(Bfields, theta, phi, normal)
+
+add_field("BPoloidal", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BToroidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    phi   = data['sph_phi']
+
+    return get_sph_phi_component(Bfields, phi, normal)
+
+add_field("BToroidal", function=_BToroidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BRadial(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_r_component(Bfields, theta, phi, normal)
+
+add_field("BRadial", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
 def _VorticitySquared(field, data):
     mylog.debug("Generating vorticity on %s", data)
     # We need to set up stencils


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -26,10 +26,10 @@
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
-    assert_string_equal
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
 
-def assert_rel_equal(a1, a2, decimels):
-    return assert_almost_equal(a1/a2, 1.0, decimels)
+def assert_rel_equal(a1, a2, decimals):
+    return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 
@@ -139,11 +139,16 @@
         ndims = [ndims, ndims, ndims]
     else:
         assert(len(ndims) == 3)
-    if negative:
-        offset = 0.5
-    else:
-        offset = 0.0
+    if not iterable(negative):
+        negative = [negative for f in fields]
+    assert(len(fields) == len(negative))
+    offsets = []
+    for n in negative:
+        if n:
+            offsets.append(0.5)
+        else:
+            offsets.append(0.0)
     data = dict((field, (np.random.random(ndims) - offset) * peak_value)
-                 for field in fields)
+                 for field,offset in zip(fields,offsets))
     ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
     return ug


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -68,9 +68,12 @@
 def evaluate_domain_decomposition(n_d, pieces, ldom):
     """ Evaluate longest to shortest edge ratio
         BEWARE: lot's of magic here """
-    ideal_bsize = 3.0 * (pieces * np.product(n_d) ** 2) ** (1.0 / 3.0)
-    bsize = int(np.sum(
-        ldom / np.array(n_d, dtype=np.float64) * np.product(n_d)))
+    eff_dim = (n_d > 1).sum()
+    ideal_bsize = eff_dim * (pieces * np.product(n_d) ** (eff_dim - 1)
+                             ) ** (1.0 / eff_dim)
+    mask = np.where(n_d > 1)
+    nd_arr = np.array(n_d, dtype=np.float64)[mask]
+    bsize = int(np.sum(ldom[mask] / nd_arr * np.product(nd_arr)))
     load_balance = float(np.product(n_d)) / \
         (float(pieces) * np.product((n_d - 1) / ldom + 1))
 
@@ -134,23 +137,15 @@
 
 
 def split_array(tab, psize):
-    """ Split array into px*py*pz subarrays using internal numpy routine. """
-    temp = [np.array_split(array, psize[1], axis=1)
-            for array in np.array_split(tab, psize[2], axis=2)]
-    temp = [item for sublist in temp for item in sublist]
-    temp = [np.array_split(array, psize[0], axis=0) for array in temp]
-    temp = [item for sublist in temp for item in sublist]
-    return temp
-
-
-if __name__ == "__main__":
-
-    NPROC = 12
-    ARRAY = np.zeros((128, 128, 129))
-    BBOX = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
-
-    PROCS = get_psize(np.array(ARRAY.shape), NPROC)
-    LE, RE, DATA = decompose_array(ARRAY, PROCS, BBOX)
-
-    for idx in range(NPROC):
-        print LE[idx, :], RE[idx, :], DATA[idx].shape
+    """ Split array into px*py*pz subarrays. """
+    n_d = np.array(tab.shape, dtype=np.int64)
+    slices = []
+    for i in range(psize[0]):
+        for j in range(psize[1]):
+            for k in range(psize[2]):
+                piece = np.array((i, j, k), dtype=np.int64)
+                lei = n_d * piece / psize
+                rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
+                slices.append(np.s_[lei[0]:rei[0], lei[1]:
+                                    rei[1], lei[2]:rei[2]])
+    return [tab[slc] for slc in slices]


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -147,6 +147,14 @@
         return "You must create an API key before uploading.  See " + \
                "https://data.yt-project.org/getting_started.html"
 
+class YTNoFilenamesMatchPattern(YTException):
+    def __init__(self, pattern):
+        self.pattern = pattern
+
+    def __str__(self):
+        return "No filenames were found to match the pattern: " + \
+               "'%s'" % (self.pattern)
+
 class YTNoOldAnswer(YTException):
     def __init__(self, path):
         self.path = path


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/kdtree/__init__.py
--- a/yt/utilities/kdtree/__init__.py
+++ b/yt/utilities/kdtree/__init__.py
@@ -1,1 +0,0 @@
-from fKDpy import *


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/kdtree/api.py
--- /dev/null
+++ b/yt/utilities/kdtree/api.py
@@ -0,0 +1,9 @@
+from fKDpy import \
+    chainHOP_tags_dens, \
+    create_tree, \
+    fKD, \
+    find_nn_nearest_neighbors, \
+    free_tree, \
+    find_chunk_nearest_neighbors
+
+


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -338,3 +338,47 @@
                     rg[2,i,j,k] = zg[i,j,k] - c[2]
         return rg
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def obtain_rv_vec(data):
+    # This is just to let the pointers exist and whatnot.  We can't cdef them
+    # inside conditionals.
+    cdef np.ndarray[np.float64_t, ndim=1] vxf
+    cdef np.ndarray[np.float64_t, ndim=1] vyf
+    cdef np.ndarray[np.float64_t, ndim=1] vzf
+    cdef np.ndarray[np.float64_t, ndim=2] rvf
+    cdef np.ndarray[np.float64_t, ndim=3] vxg
+    cdef np.ndarray[np.float64_t, ndim=3] vyg
+    cdef np.ndarray[np.float64_t, ndim=3] vzg
+    cdef np.ndarray[np.float64_t, ndim=4] rvg
+    cdef np.float64_t bv[3]
+    cdef int i, j, k
+    bulk_velocity = data.get_field_parameter("bulk_velocity")
+    if bulk_velocity == None:
+        bulk_velocity = np.zeros(3)
+    bv[0] = bulk_velocity[0]; bv[1] = bulk_velocity[1]; bv[2] = bulk_velocity[2]
+    if len(data['x-velocity'].shape) == 1:
+        # One dimensional data
+        vxf = data['x-velocity'].astype("float64")
+        vyf = data['y-velocity'].astype("float64")
+        vzf = data['z-velocity'].astype("float64")
+        rvf = np.empty((3, vxf.shape[0]), 'float64')
+        for i in range(vxf.shape[0]):
+            rvf[0, i] = vxf[i] - bv[0]
+            rvf[1, i] = vyf[i] - bv[1]
+            rvf[2, i] = vzf[i] - bv[2]
+        return rvf
+    else:
+        # Three dimensional data
+        vxg = data['x-velocity'].astype("float64")
+        vyg = data['y-velocity'].astype("float64")
+        vzg = data['z-velocity'].astype("float64")
+        rvg = np.empty((3, vxg.shape[0], vxg.shape[1], vxg.shape[2]), 'float64')
+        for i in range(vxg.shape[0]):
+            for j in range(vxg.shape[1]):
+                for k in range(vxg.shape[2]):
+                    rvg[0,i,j,k] = vxg[i,j,k] - bv[0]
+                    rvg[1,i,j,k] = vyg[i,j,k] - bv[1]
+                    rvg[2,i,j,k] = vzg[i,j,k] - bv[2]
+        return rvg


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -233,49 +233,6 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def obtain_rvec(data):
-    # This is just to let the pointers exist and whatnot.  We can't cdef them
-    # inside conditionals.
-    cdef np.ndarray[np.float64_t, ndim=1] xf
-    cdef np.ndarray[np.float64_t, ndim=1] yf
-    cdef np.ndarray[np.float64_t, ndim=1] zf
-    cdef np.ndarray[np.float64_t, ndim=2] rf
-    cdef np.ndarray[np.float64_t, ndim=3] xg
-    cdef np.ndarray[np.float64_t, ndim=3] yg
-    cdef np.ndarray[np.float64_t, ndim=3] zg
-    cdef np.ndarray[np.float64_t, ndim=4] rg
-    cdef np.float64_t c[3]
-    cdef int i, j, k
-    center = data.get_field_parameter("center")
-    c[0] = center[0]; c[1] = center[1]; c[2] = center[2]
-    if len(data['x'].shape) == 1:
-        # One dimensional data
-        xf = data['x']
-        yf = data['y']
-        zf = data['z']
-        rf = np.empty((3, xf.shape[0]), 'float64')
-        for i in range(xf.shape[0]):
-            rf[0, i] = xf[i] - c[0]
-            rf[1, i] = yf[i] - c[1]
-            rf[2, i] = zf[i] - c[2]
-        return rf
-    else:
-        # Three dimensional data
-        xg = data['x']
-        yg = data['y']
-        zg = data['z']
-        rg = np.empty((3, xg.shape[0], xg.shape[1], xg.shape[2]), 'float64')
-        for i in range(xg.shape[0]):
-            for j in range(xg.shape[1]):
-                for k in range(xg.shape[2]):
-                    rg[0,i,j,k] = xg[i,j,k] - c[0]
-                    rg[1,i,j,k] = yg[i,j,k] - c[1]
-                    rg[2,i,j,k] = zg[i,j,k] - c[2]
-        return rg
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
 def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data,
                        np.ndarray[np.float64_t, ndim=1] l_corner,
                        np.ndarray[np.float64_t, ndim=1] r_corner):


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/lib/tests/test_geometry_utils.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_geometry_utils.py
@@ -0,0 +1,30 @@
+from yt.testing import *
+from yt.utilities.lib import obtain_rvec, obtain_rv_vec
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_obtain_rvec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+    
+    dd = pf.h.sphere((0.5,0.5,0.5), 0.2)
+
+    coords = obtain_rvec(dd)
+
+    r = np.sqrt(np.sum(coords*coords,axis=0))
+
+    assert_array_less(r.max(), 0.2)
+
+    assert_array_less(0.0, r.min())
+
+def test_obtain_rv_vec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+
+    dd = pf.h.all_data()
+
+    vels = obtain_rv_vec(dd)
+
+    assert_array_equal(vels[0,:], dd['x-velocity'])
+    assert_array_equal(vels[1,:], dd['y-velocity'])
+    assert_array_equal(vels[2,:], dd['z-velocity'])


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -674,3 +674,191 @@
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
     
     return R
+
+def get_ortho_basis(normal):
+    xprime = np.cross([0.0,1.0,0.0],normal)
+    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
+    yprime = np.cross(normal,xprime)
+    zprime = normal
+    return (xprime, yprime, zprime)
+
+def get_sph_r(coords):
+    # The spherical coordinates radius is simply the magnitude of the
+    # coordinate vector.
+
+    return np.sqrt(np.sum(coords**2, axis=0))
+
+def resize_vector(vector,vector_array):
+    if len(vector_array.shape) == 4:
+        res_vector = np.resize(vector,(3,1,1,1))
+    else:
+        res_vector = np.resize(vector,(3,1))
+    return res_vector
+
+def get_sph_theta(coords, normal):
+    # The angle (theta) with respect to the normal (J), is the arccos
+    # of the dot product of the normal with the normalized coordinate
+    # vector.
+    
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    
+    J = np.tile(res_normal,tile_shape)
+
+    JdotCoords = np.sum(J*coords,axis=0)
+    
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=0)) )
+
+def get_sph_phi(coords, normal):
+    # We have freedom with respect to what axis (xprime) to define
+    # the disk angle. Here I've chosen to use the axis that is
+    # perpendicular to the normal and the y-axis. When normal ==
+    # y-hat, then set xprime = z-hat. With this definition, when
+    # normal == z-hat (as is typical), then xprime == x-hat.
+    #
+    # The angle is then given by the arctan of the ratio of the
+    # yprime-component and the xprime-component of the coordinate 
+    # vector.
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, coords)
+    res_yprime = resize_vector(yprime, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    Px = np.sum(Jx*coords,axis=0)
+    Py = np.sum(Jy*coords,axis=0)
+    
+    return np.arctan2(Py,Px)
+
+def get_cyl_r(coords, normal):
+    # The cross product of the normal (J) with a coordinate vector
+    # gives a vector of magnitude equal to the cylindrical radius.
+
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
+    
+    JcrossCoords = np.cross(J, coords, axisa=0, axisb=0, axisc=0)
+    return np.sqrt(np.sum(JcrossCoords**2, axis=0))
+
+def get_cyl_z(coords, normal):
+    # The dot product of the normal (J) with the coordinate vector 
+    # gives the cylindrical height.
+
+    res_normal = resize_vector(normal, coords)
+    
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
+
+    return np.sum(J*coords, axis=0)  
+
+def get_cyl_theta(coords, normal):
+    # This is identical to the spherical phi component
+
+    return get_sph_phi(coords, normal)
+
+
+def get_cyl_r_component(vectors, theta, normal):
+    # The r of a vector is the vector dotted with rhat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    rhat = Jx*np.cos(theta) + Jy*np.sin(theta)
+
+    return np.sum(vectors*rhat,axis=0)
+
+def get_cyl_theta_component(vectors, theta, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    thetahat = -Jx*np.sin(theta) + Jy*np.cos(theta)
+
+    return np.sum(vectors*thetahat, axis=0)
+
+def get_cyl_z_component(vectors, normal):
+    # The z component of a vector is the vector dotted with zhat
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    zhat = np.tile(res_zprime, tile_shape)
+
+    return np.sum(vectors*zhat, axis=0)
+
+def get_sph_r_component(vectors, theta, phi, normal):
+    # The r component of a vector is the vector dotted with rhat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
+
+    rhat = Jx*np.sin(theta)*np.cos(phi) + \
+           Jy*np.sin(theta)*np.sin(phi) + \
+           Jz*np.cos(theta)
+
+    return np.sum(vectors*rhat, axis=0)
+
+def get_sph_phi_component(vectors, phi, normal):
+    # The phi component of a vector is the vector dotted with phihat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    phihat = -Jx*np.sin(phi) + Jy*np.cos(phi)
+
+    return np.sum(vectors*phihat, axis=0)
+
+def get_sph_theta_component(vectors, theta, phi, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
+    
+    thetahat = Jx*np.cos(theta)*np.cos(phi) + \
+               Jy*np.cos(theta)*np.sin(phi) - \
+               Jz*np.sin(theta)
+
+    return np.sum(vectors*thetahat, axis=0)


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/tests/test_coordinate_conversions.py
--- /dev/null
+++ b/yt/utilities/tests/test_coordinate_conversions.py
@@ -0,0 +1,125 @@
+from yt.testing import *
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
+
+# Randomly generated coordinates in the domain [[-1,1],[-1,1],-1,1]]
+coords = np.array([[-0.41503037, -0.22102472, -0.55774212],
+                   [ 0.73828247, -0.17913899,  0.64076921],
+                   [ 0.08922066, -0.94254844, -0.61774511],
+                   [ 0.10173242, -0.95789145,  0.16294352],
+                   [ 0.73186508, -0.3109153 ,  0.75728738],
+                   [ 0.8757989 , -0.41475119, -0.57039201],
+                   [ 0.58040762,  0.81969082,  0.46759728],
+                   [-0.89983356, -0.9853683 , -0.38355343]]).T
+
+def test_spherical_coordinate_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.72950559,  0.99384957,  1.13047198,  0.97696269,  
+                   1.09807968,  1.12445067,  1.10788685,  1.38843954]
+    real_theta = [ 2.44113629,  0.87012028,  2.14891444,  1.4032274 ,  
+                   0.80979483,  2.10280198,  1.13507735,  1.85068416]
+    real_phi =   [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+
+    calc_r = get_sph_r(coords)
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+    normal = [1, 0, 0]
+    real_theta = [ 2.17598842,  0.73347681,  1.49179079,  1.46647589,  
+                   0.8412984 ,  0.67793705,  1.0193883 ,  2.27586987]
+    real_phi =   [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                   -2.75201227,-0.62870527,  2.08920872, -1.19959244]
+
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+    
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+def test_cylindrical_coordiante_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.47021498,  0.75970506,  0.94676179,  0.96327853,  
+                   0.79516968,  0.96904193,  1.00437346,  1.3344104 ]    
+    real_theta = [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+    real_z =     [-0.55774212,  0.64076921, -0.61774511,  0.16294352,
+                   0.75728738, -0.57039201,  0.46759728, -0.38355343]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+    normal = [1, 0, 0]
+    real_r =     [ 0.59994016,  0.66533898,  1.12694569,  0.97165149,
+                   0.81862843,  0.70524152,  0.94368441,  1.05738542]
+    real_theta = [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                  -2.75201227, -0.62870527,  2.08920872, -1.19959244]
+    real_z =     [-0.41503037,  0.73828247,  0.08922066,  0.10173242,
+                   0.73186508,  0.8757989 ,  0.58040762, -0.89983356]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+def test_spherical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_sph_theta(coords, normal)
+    phi = get_sph_phi(coords, normal)
+    zero = np.tile(0,coords.shape[1])
+
+    # Purely radial field
+    vecs = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(phi), np.cos(phi), zero])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
+
+    # Purely poloidal field
+    vecs = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -np.sin(theta)])
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
+
+def test_cylindrical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_cyl_theta(coords, normal)
+    z = get_cyl_z(coords, normal)
+    zero = np.tile(0, coords.shape[1])
+
+    # Purely radial field
+    vecs = np.array([np.cos(theta), np.sin(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(theta), np.cos(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))
+
+    # Purely z field
+    vecs = np.array([zero, zero, z])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/tests/test_decompose.py
--- /dev/null
+++ b/yt/utilities/tests/test_decompose.py
@@ -0,0 +1,96 @@
+"""
+Test suite for cartesian domain decomposition.
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import assert_array_equal, assert_almost_equal
+import numpy as np
+import yt.utilities.decompose as dec
+
+
+def setup():
+    pass
+
+
+def test_psize_2d():
+    procs = dec.get_psize(np.array([5, 1, 7]), 6)
+    assert_array_equal(procs, np.array([3, 1, 2]))
+    procs = dec.get_psize(np.array([1, 7, 5]), 6)
+    assert_array_equal(procs, np.array([1, 2, 3]))
+    procs = dec.get_psize(np.array([7, 5, 1]), 6)
+    assert_array_equal(procs, np.array([2, 3, 1]))
+
+
+def test_psize_3d():
+    procs = dec.get_psize(np.array([33, 35, 37]), 12)
+    assert_array_equal(procs, np.array([3, 2, 2]))
+
+
+def test_decomposition_2d():
+    array = np.ones((7, 5, 1))
+    bbox = np.array([[-0.7, 0.0], [1.5, 2.0], [0.0, 0.7]])
+    ledge, redge, data = dec.decompose_array(array, np.array([2, 3, 1]), bbox)
+
+    assert_array_equal(data[1].shape, np.array([3, 2, 1]))
+
+    gold_le = np.array([
+                       [-0.7, 1.5, 0.0], [-0.7, 1.6, 0.0],
+                       [-0.7, 1.8, 0.0], [-0.4, 1.5, 0.0],
+                       [-0.4, 1.6, 0.0], [-0.4, 1.8, 0.0]
+                       ])
+    assert_almost_equal(ledge, gold_le, 8)
+
+    gold_re = np.array(
+        [[-0.4, 1.6, 0.7], [-0.4, 1.8, 0.7],
+         [-0.4, 2.0, 0.7], [0.0, 1.6, 0.7],
+         [0.0, 1.8, 0.7], [0.0, 2.0, 0.7]]
+    )
+    assert_almost_equal(redge, gold_re, 8)
+
+
+def test_decomposition_3d():
+    array = np.ones((33, 35, 37))
+    bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+
+    ledge, redge, data = dec.decompose_array(array, np.array([3, 2, 2]), bbox)
+    assert_array_equal(data[0].shape, np.array([11, 17, 18]))
+
+    gold_le = np.array(
+        [[0.00000, -1.50000, 1.00000], [0.00000, -1.50000, 1.72973],
+         [0.00000, -0.04286, 1.00000], [0.00000, -0.04286, 1.72973],
+         [0.33333, -1.50000, 1.00000], [0.33333, -1.50000, 1.72973],
+         [0.33333, -0.04286, 1.00000], [0.33333, -0.04286, 1.72973],
+         [0.66667, -1.50000, 1.00000], [0.66667, -1.50000, 1.72973],
+         [0.66667, -0.04286, 1.00000], [0.66667, -0.04286, 1.72973]]
+    )
+    assert_almost_equal(ledge, gold_le, 5)
+
+    gold_re = np.array(
+        [[0.33333, -0.04286, 1.72973], [0.33333, -0.04286, 2.50000],
+         [0.33333, 1.50000, 1.72973], [0.33333, 1.50000, 2.50000],
+         [0.66667, -0.04286, 1.72973], [0.66667, -0.04286, 2.50000],
+         [0.66667, 1.50000, 1.72973], [0.66667, 1.50000, 2.50000],
+         [1.00000, -0.04286, 1.72973], [1.00000, -0.04286, 2.50000],
+         [1.00000, 1.50000, 1.72973], [1.00000, 1.50000, 2.50000]]
+    )
+    assert_almost_equal(redge, gold_re, 5)


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/utilities/tests/test_kdtrees.py
--- a/yt/utilities/tests/test_kdtrees.py
+++ b/yt/utilities/tests/test_kdtrees.py
@@ -26,10 +26,7 @@
 from yt.testing import *
 
 try:
-    from yt.utilities.kdtree import \
-        chainHOP_tags_dens, \
-        create_tree, fKD, find_nn_nearest_neighbors, \
-        free_tree, find_chunk_nearest_neighbors
+    from yt.utilities.kdtree.api import *
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -364,39 +364,30 @@
 
 class StreamlineCallback(PlotCallback):
     _type_name = "streamlines"
-    def __init__(self, field_x, field_y, factor=6.0, nx=16, ny=16,
-                 xstart=(0,1), ystart=(0,1), nsample=256,
-                 start_at_xedge=False, start_at_yedge=False,
-                 plot_args=None):
+    def __init__(self, field_x, field_y, factor = 16,
+                 density = 1, arrowsize = 1, arrowstyle = None,
+                 color = None, normalize = False):
         """
-        annotate_streamlines(field_x, field_y, factor=6.0, nx=16, ny=16,
-                             xstart=(0,1), ystart=(0,1), nsample=256,
-                             start_at_xedge=False, start_at_yedge=False,
-                             plot_args=None):
+        annotate_streamlines(field_x, field_y, factor = 16, density = 1,
+                             arrowsize = 1, arrowstyle = None,
+                             color = None, normalize = False):
 
         Add streamlines to any plot, using the *field_x* and *field_y*
-        from the associated data, using *nx* and *ny* starting points
-        that are bounded by *xstart* and *ystart*.  To begin
-        streamlines from the left edge of the plot, set
-        *start_at_xedge* to True; for the bottom edge, use
-        *start_at_yedge*.  A line with the qmean vector magnitude will
-        cover 1.0/*factor* of the image.
+        from the associated data, skipping every *factor* datapoints like
+        'quiver'. *density* is the index of the amount of the streamlines.
         """
         PlotCallback.__init__(self)
         self.field_x = field_x
         self.field_y = field_y
-        self.xstart = xstart
-        self.ystart = ystart
-        self.nsample = nsample
+        self.bv_x = self.bv_y = 0
         self.factor = factor
-        if start_at_xedge:
-            self.data_size = (1,ny)
-        elif start_at_yedge:
-            self.data_size = (nx,1)
-        else:
-            self.data_size = (nx,ny)
-        if plot_args is None: plot_args = {'color':'k', 'linestyle':'-'}
-        self.plot_args = plot_args
+        self.dens = density
+        self.arrowsize = arrowsize
+        if arrowstyle is None : arrowstyle='-|>'
+        self.arrowstyle = arrowstyle
+        if color is None : color = "#000000"
+        self.color = color
+        self.normalize = normalize
         
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -404,43 +395,31 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0]
-        ny = plot.image._A.shape[1]
+        nx = plot.image._A.shape[0] / self.factor
+        ny = plot.image._A.shape[1] / self.factor
         pixX = _MPL.Pixelize(plot.data['px'],
                              plot.data['py'],
                              plot.data['pdx'],
                              plot.data['pdy'],
-                             plot.data[self.field_x],
+                             plot.data[self.field_x] - self.bv_x,
                              int(nx), int(ny),
-                           (x0, x1, y0, y1),)
+                           (x0, x1, y0, y1),).transpose()
         pixY = _MPL.Pixelize(plot.data['px'],
                              plot.data['py'],
                              plot.data['pdx'],
                              plot.data['pdy'],
-                             plot.data[self.field_y],
+                             plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
-                           (x0, x1, y0, y1),)
-        r0 = np.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
-                      self.ystart[0]*ny:self.ystart[1]*ny:self.data_size[1]*1j]
-        lines = np.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
-        lines[0,:,:,:] = r0
-        mag = np.sqrt(pixX**2 + pixY**2)
-        scale = np.sqrt(nx*ny) / (self.factor * mag.mean())
-        dt = 1.0 / (self.nsample-1)
-        for i in range(1,self.nsample):
-            xt = lines[i-1,0,:,:]
-            yt = lines[i-1,1,:,:]
-            ix = np.maximum(np.minimum((xt).astype('int'), nx-1), 0)
-            iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
-            lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
-            lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
-        # scale into data units
-        lines[:,0,:,:] = lines[:,0,:,:] * (xx1 - xx0) / nx + xx0
-        lines[:,1,:,:] = lines[:,1,:,:] * (yy1 - yy0) / ny + yy0
-        for i in range(self.data_size[0]):
-            for j in range(self.data_size[1]):
-                plot._axes.plot(lines[:,0,i,j], lines[:,1,i,j],
-                                **self.plot_args)
+                           (x0, x1, y0, y1),).transpose()
+        X,Y = (na.linspace(xx0,xx1,nx,endpoint=True),
+                          na.linspace(yy0,yy1,ny,endpoint=True))
+        if self.normalize:
+            nn = na.sqrt(pixX**2 + pixY**2)
+            pixX /= nn
+            pixY /= nn
+        plot._axes.streamplot(X,Y, pixX, pixY, density=self.dens,
+                              arrowsize=self.arrowsize, arrowstyle=self.arrowstyle,
+                              color=self.color, norm=self.normalize)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)


diff -r 10ba9aa8f38870887afb363dc5ffce8dba53ce1d -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -146,7 +146,8 @@
     @parallel_passthrough
     def _finalize_parallel(self,data):
         self.streamlines = self.comm.mpi_allreduce(self.streamlines, op='sum')
-        self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
+        if self.get_magnitude:
+            self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
         
     def _integrate_through_brick(self, node, stream, step,
                                  periodic=False, mag=None):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/201708b64058/
changeset:   201708b64058
branch:      yt
user:        MatthewTurk
date:        2012-10-23 20:18:56
summary:     Fixing the typo (thanks, Nathan!), and adding a big_data option to
@requires_pf.
affected #:  3 files

diff -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 -r 201708b64058a3cbfe9c458c27655e67f6c399e2 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -8,4 +8,4 @@
 exclude=answer_testing
 with-xunit=1
 with-answer-testing=1
-#answer-compare=gold001
+answer-compare=gold001


diff -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 -r 201708b64058a3cbfe9c458c27655e67f6c399e2 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -43,7 +43,7 @@
         yield test
 
 g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
- at requires_pf(g30)
+ at requires_pf(g30, big_data=True)
 def test_galaxy0030():
     pf = data_dir_load(g30)
     yield assert_equal, str(pf), "galaxy0030"


diff -r 65afdb7dbe7d4dee0ef423cb2e80a45103067073 -r 201708b64058a3cbfe9c458c27655e67f6c399e2 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -74,6 +74,9 @@
             my_hash = "UNKNOWN%s" % (time.time())
         parser.add_option("--answer-compare", dest="compare_name",
             default=None, help="The name against which we will compare")
+        parser.add_option("--answer-big-data", dest="big_data",
+            default=False, help="Should we run against big data, too?",
+            action="store_true")
         parser.add_option("--answer-name", dest="this_name",
             default=my_hash,
             help="The name we'll call this set of tests")
@@ -97,6 +100,8 @@
                 AnswerTestOpener(options.compare_name)
         self.answer_name = options.this_name
         self.store_results = options.store_results
+        global run_big_data
+        run_big_data = options.big_data
 
     def finalize(self, result):
         # This is where we dump our result storage up to Amazon, if we are able
@@ -148,7 +153,7 @@
         if self.reference_storage is not None:
             dd = self.reference_storage.get(str(self.pf))
             if dd is None: raise YTNoOldAnswer()
-            ov = dd[self.descrption]
+            ov = dd[self.description]
             self.compare(nv, ov)
         else:
             ov = None
@@ -345,12 +350,15 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
-def requires_pf(pf_fn):
+def requires_pf(pf_fn, big_data = False):
     def ffalse(func):
         return lambda: None
     def ftrue(func):
         return func
-    if not can_run_pf(pf_fn):
+    global run_big_data
+    if run_big_data == False and big_data == True:
+        return ffalse
+    elif not can_run_pf(pf_fn):
         return ffalse
     else:
         return ftrue



https://bitbucket.org/yt_analysis/yt-3.0/changeset/58b6bb1c8283/
changeset:   58b6bb1c8283
branch:      yt
user:        MatthewTurk
date:        2012-10-23 20:47:23
summary:     Splitting the answer testing into a separate plugin file, to allow it to be
imported directly in setup.py.
affected #:  3 files

diff -r 201708b64058a3cbfe9c458c27655e67f6c399e2 -r 58b6bb1c8283318125e9691a93d6b1ac6b715e2d setup.py
--- a/setup.py
+++ b/setup.py
@@ -170,4 +170,6 @@
     return
 
 if __name__ == '__main__':
+    if "nosetests" in sys.argv: 
+        from yt.utilities.answer_testing.plugin import AnswerTesting
     setup_package()


diff -r 201708b64058a3cbfe9c458c27655e67f6c399e2 -r 58b6bb1c8283318125e9691a93d6b1ac6b715e2d yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -29,12 +29,9 @@
 import contextlib
 import urllib2
 
+from .plugin import AnswerTesting, run_big_data
 from yt.testing import *
-from yt.utilities.command_line import get_yt_version
 from yt.config import ytcfg
-from yt.utilities.logger import \
-    disable_stream_logging
-from nose.plugins import Plugin
 from yt.mods import *
 import cPickle
 
@@ -63,63 +60,6 @@
         self.cache[pf_name] = rv
         return rv
 
-class AnswerTesting(Plugin):
-    name = "answer-testing"
-
-    def options(self, parser, env=os.environ):
-        super(AnswerTesting, self).options(parser, env=env)
-        try:
-            my_hash = get_yt_version()
-        except:
-            my_hash = "UNKNOWN%s" % (time.time())
-        parser.add_option("--answer-compare", dest="compare_name",
-            default=None, help="The name against which we will compare")
-        parser.add_option("--answer-big-data", dest="big_data",
-            default=False, help="Should we run against big data, too?",
-            action="store_true")
-        parser.add_option("--answer-name", dest="this_name",
-            default=my_hash,
-            help="The name we'll call this set of tests")
-        parser.add_option("--answer-store", dest="store_results",
-            default=False, action="store_true")
-
-    def configure(self, options, conf):
-        super(AnswerTesting, self).configure(options, conf)
-        if not self.enabled:
-            return
-        disable_stream_logging()
-        from yt.config import ytcfg
-        ytcfg["yt","__withintesting"] = "True"
-        AnswerTestingTest.result_storage = \
-            self.result_storage = defaultdict(dict)
-        if options.compare_name is not None:
-            # Now we grab from our S3 store
-            if options.compare_name == "latest":
-                options.compare_name = _latest
-            AnswerTestingTest.reference_storage = \
-                AnswerTestOpener(options.compare_name)
-        self.answer_name = options.this_name
-        self.store_results = options.store_results
-        global run_big_data
-        run_big_data = options.big_data
-
-    def finalize(self, result):
-        # This is where we dump our result storage up to Amazon, if we are able
-        # to.
-        if self.store_results is False: return
-        import boto
-        from boto.s3.key import Key
-        c = boto.connect_s3()
-        bucket = c.get_bucket("yt-answer-tests")
-        for pf_name in self.result_storage:
-            rs = cPickle.dumps(self.result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
-            if tk is not None: tk.delete()
-            k = Key(bucket)
-            k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
-            k.set_acl("public-read")
-
 @contextlib.contextmanager
 def temp_cwd(cwd):
     oldcwd = os.getcwd()
@@ -355,7 +295,6 @@
         return lambda: None
     def ftrue(func):
         return func
-    global run_big_data
     if run_big_data == False and big_data == True:
         return ffalse
     elif not can_run_pf(pf_fn):


diff -r 201708b64058a3cbfe9c458c27655e67f6c399e2 -r 58b6bb1c8283318125e9691a93d6b1ac6b715e2d yt/utilities/answer_testing/plugin.py
--- /dev/null
+++ b/yt/utilities/answer_testing/plugin.py
@@ -0,0 +1,97 @@
+"""
+Answer Testing using Nose as a starting point
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import logging
+import os
+import hashlib
+import contextlib
+import urllib2
+import cPickle
+from collections import defaultdict
+from nose.plugins import Plugin
+
+run_big_data = False
+
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=None, help="The name against which we will compare")
+        parser.add_option("--answer-big-data", dest="big_data",
+            default=False, help="Should we run against big data, too?",
+            action="store_true")
+        parser.add_option("--answer-name", dest="this_name",
+            default=None,
+            help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+        from yt.utilities.logger import disable_stream_logging
+        disable_stream_logging()
+        from yt.utilities.command_line import get_yt_version
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        if options.this_name is None: options.this_name = my_hash
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
+        from .framework import AnswerTestingTest, AnswerTestOpener
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
+        if options.compare_name is not None:
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
+        global run_big_data
+        run_big_data = options.big_data
+
+    def finalize(self, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0ff74ce4cb81/
changeset:   0ff74ce4cb81
branch:      yt
user:        MatthewTurk
date:        2012-10-23 20:50:16
summary:     Removing items from the answer testing __init__.py.  This may fix the issues
with Shining Panda.
affected #:  2 files

diff -r 58b6bb1c8283318125e9691a93d6b1ac6b715e2d -r 0ff74ce4cb81b2c7351b4313d0062fda89c99956 yt/utilities/answer_testing/__init__.py
--- a/yt/utilities/answer_testing/__init__.py
+++ b/yt/utilities/answer_testing/__init__.py
@@ -22,10 +22,3 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
-import runner
-import output_tests
-from runner import RegressionTestRunner
-
-from output_tests import RegressionTest, SingleOutputTest, \
-    MultipleOutputTest, YTStaticOutputTest, create_test


diff -r 58b6bb1c8283318125e9691a93d6b1ac6b715e2d -r 0ff74ce4cb81b2c7351b4313d0062fda89c99956 yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -57,8 +57,3 @@
     TestBooleanANDParticleQuantity, \
     TestBooleanORParticleQuantity, \
     TestBooleanNOTParticleQuantity
-
-try:
-    from .framework import AnswerTesting
-except ImportError:
-    raise



https://bitbucket.org/yt_analysis/yt-3.0/changeset/4bd73932fca9/
changeset:   4bd73932fca9
branch:      yt
user:        MatthewTurk
date:        2012-10-23 21:18:17
summary:     Moving the answer testing plugin back where it came from, to avoid the weird
imports.  I believe I now have a solution to running nosetests on SP.
affected #:  4 files

diff -r 0ff74ce4cb81b2c7351b4313d0062fda89c99956 -r 4bd73932fca9eacd0a9876f8970e10e06803cb28 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -7,5 +7,3 @@
 where=yt
 exclude=answer_testing
 with-xunit=1
-with-answer-testing=1
-answer-compare=gold001


diff -r 0ff74ce4cb81b2c7351b4313d0062fda89c99956 -r 4bd73932fca9eacd0a9876f8970e10e06803cb28 setup.py
--- a/setup.py
+++ b/setup.py
@@ -156,7 +156,7 @@
                             'yt = yt.utilities.command_line:run_main',
                       ],
                       'nose.plugins.0.10': [
-                            'answer-testing = yt.utilities.answer_testing.api:AnswerTesting'
+                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
                       ]
         },
         author="Matthew J. Turk",
@@ -170,6 +170,4 @@
     return
 
 if __name__ == '__main__':
-    if "nosetests" in sys.argv: 
-        from yt.utilities.answer_testing.plugin import AnswerTesting
     setup_package()


diff -r 0ff74ce4cb81b2c7351b4313d0062fda89c99956 -r 4bd73932fca9eacd0a9876f8970e10e06803cb28 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -28,18 +28,81 @@
 import hashlib
 import contextlib
 import urllib2
+import cPickle
 
-from .plugin import AnswerTesting, run_big_data
+from nose.plugins import Plugin
 from yt.testing import *
 from yt.config import ytcfg
 from yt.mods import *
 import cPickle
 
+from yt.utilities.logger import disable_stream_logging
+from yt.utilities.command_line import get_yt_version
+
 mylog = logging.getLogger('nose.plugins.answer-testing')
+run_big_data = False
 
-_latest = "SomeValue"
+_latest = "gold001"
 _url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
 
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=_latest, help="The name against which we will compare")
+        parser.add_option("--answer-big-data", dest="big_data",
+            default=False, help="Should we run against big data, too?",
+            action="store_true")
+        parser.add_option("--answer-name", dest="this_name",
+            default=None,
+            help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+        disable_stream_logging()
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        if options.this_name is None: options.this_name = my_hash
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
+        if options.compare_name is not None:
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
+        global run_big_data
+        run_big_data = options.big_data
+
+    def finalize(self, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
 class AnswerTestOpener(object):
     def __init__(self, reference_name):
         self.reference_name = reference_name


diff -r 0ff74ce4cb81b2c7351b4313d0062fda89c99956 -r 4bd73932fca9eacd0a9876f8970e10e06803cb28 yt/utilities/answer_testing/plugin.py
--- a/yt/utilities/answer_testing/plugin.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""
-Answer Testing using Nose as a starting point
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-import logging
-import os
-import hashlib
-import contextlib
-import urllib2
-import cPickle
-from collections import defaultdict
-from nose.plugins import Plugin
-
-run_big_data = False
-
-class AnswerTesting(Plugin):
-    name = "answer-testing"
-
-    def options(self, parser, env=os.environ):
-        super(AnswerTesting, self).options(parser, env=env)
-        parser.add_option("--answer-compare", dest="compare_name",
-            default=None, help="The name against which we will compare")
-        parser.add_option("--answer-big-data", dest="big_data",
-            default=False, help="Should we run against big data, too?",
-            action="store_true")
-        parser.add_option("--answer-name", dest="this_name",
-            default=None,
-            help="The name we'll call this set of tests")
-        parser.add_option("--answer-store", dest="store_results",
-            default=False, action="store_true")
-
-    def configure(self, options, conf):
-        super(AnswerTesting, self).configure(options, conf)
-        if not self.enabled:
-            return
-        from yt.utilities.logger import disable_stream_logging
-        disable_stream_logging()
-        from yt.utilities.command_line import get_yt_version
-        try:
-            my_hash = get_yt_version()
-        except:
-            my_hash = "UNKNOWN%s" % (time.time())
-        if options.this_name is None: options.this_name = my_hash
-        from yt.config import ytcfg
-        ytcfg["yt","__withintesting"] = "True"
-        from .framework import AnswerTestingTest, AnswerTestOpener
-        AnswerTestingTest.result_storage = \
-            self.result_storage = defaultdict(dict)
-        if options.compare_name is not None:
-            # Now we grab from our S3 store
-            if options.compare_name == "latest":
-                options.compare_name = _latest
-            AnswerTestingTest.reference_storage = \
-                AnswerTestOpener(options.compare_name)
-        self.answer_name = options.this_name
-        self.store_results = options.store_results
-        global run_big_data
-        run_big_data = options.big_data
-
-    def finalize(self, result):
-        # This is where we dump our result storage up to Amazon, if we are able
-        # to.
-        if self.store_results is False: return
-        import boto
-        from boto.s3.key import Key
-        c = boto.connect_s3()
-        bucket = c.get_bucket("yt-answer-tests")
-        for pf_name in self.result_storage:
-            rs = cPickle.dumps(self.result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
-            if tk is not None: tk.delete()
-            k = Key(bucket)
-            k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
-            k.set_acl("public-read")
-



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a70531b61894/
changeset:   a70531b61894
branch:      yt
user:        MatthewTurk
date:        2012-10-23 23:39:04
summary:     Adding checks to assert_rel_equal for NaNs.  These may show up if you (for
instance) project something that's weighted with zero values.
affected #:  1 file

diff -r 4bd73932fca9eacd0a9876f8970e10e06803cb28 -r a70531b618946c8018b3954ac87f5df49b36f4b0 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -29,6 +29,15 @@
     assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
 
 def assert_rel_equal(a1, a2, decimals):
+    # We have nan checks in here because occasionally we have fields that get
+    # weighted without non-zero weights.  I'm looking at you, particle fields!
+    if isinstance(a1, np.ndarray):
+        assert(a1.size == a2.size)
+        # Mask out NaNs
+        a1[np.isnan(a1)] = 1.0
+        a2[np.isnan(a2)] = 1.0
+    elif np.isnan(a1) and np.isnan(a2):
+        return True
     return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/dd8b7e82f5ee/
changeset:   dd8b7e82f5ee
branch:      yt
user:        ngoldbaum
date:        2012-10-24 00:53:32
summary:     Merged in MatthewTurk/yt (pull request #308)
affected #:  11 files



diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 setup.py
--- a/setup.py
+++ b/setup.py
@@ -154,7 +154,11 @@
             'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
                             'yt = yt.utilities.command_line:run_main',
-                       ]},
+                      ],
+                      'nose.plugins.0.10': [
+                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+                      ]
+        },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",


diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -54,6 +54,7 @@
     pasteboard_repo = '',
     reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
+    test_data_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',


diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/frontends/enzo/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -0,0 +1,51 @@
+"""
+Enzo frontend tests using moving7
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.enzo.api import EnzoStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
+           "particle_density")
+
+m7 = "DD0010/moving7_0010"
+ at requires_pf(m7)
+def test_moving7():
+    pf = data_dir_load(m7)
+    yield assert_equal, str(pf), "moving7_0010"
+    for test in small_patch_amr(m7, _fields):
+        yield test
+
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+ at requires_pf(g30, big_data=True)
+def test_galaxy0030():
+    pf = data_dir_load(g30)
+    yield assert_equal, str(pf), "galaxy0030"
+    for test in big_patch_amr(g30, _fields):
+        yield test


diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -29,6 +29,15 @@
     assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
 
 def assert_rel_equal(a1, a2, decimals):
+    # We have nan checks in here because occasionally we have fields that get
+    # weighted without non-zero weights.  I'm looking at you, particle fields!
+    if isinstance(a1, np.ndarray):
+        assert(a1.size == a2.size)
+        # Mask out NaNs
+        a1[np.isnan(a1)] = 1.0
+        a2[np.isnan(a2)] = 1.0
+    elif np.isnan(a1) and np.isnan(a2):
+        return True
     return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):


diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/utilities/answer_testing/__init__.py
--- a/yt/utilities/answer_testing/__init__.py
+++ b/yt/utilities/answer_testing/__init__.py
@@ -22,10 +22,3 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
-import runner
-import output_tests
-from runner import RegressionTestRunner
-
-from output_tests import RegressionTest, SingleOutputTest, \
-    MultipleOutputTest, YTStaticOutputTest, create_test




diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/utilities/answer_testing/default_tests.py
--- a/yt/utilities/answer_testing/default_tests.py
+++ b/yt/utilities/answer_testing/default_tests.py
@@ -67,3 +67,4 @@
         for field in sorted(self.result):
             for p1, p2 in zip(self.result[field], old_result[field]):
                 self.compare_data_arrays(p1, p2, self.tolerance)
+


diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/utilities/answer_testing/framework.py
--- /dev/null
+++ b/yt/utilities/answer_testing/framework.py
@@ -0,0 +1,396 @@
+"""
+Answer Testing using Nose as a starting point
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import logging
+import os
+import hashlib
+import contextlib
+import urllib2
+import cPickle
+
+from nose.plugins import Plugin
+from yt.testing import *
+from yt.config import ytcfg
+from yt.mods import *
+import cPickle
+
+from yt.utilities.logger import disable_stream_logging
+from yt.utilities.command_line import get_yt_version
+
+mylog = logging.getLogger('nose.plugins.answer-testing')
+run_big_data = False
+
+_latest = "gold001"
+_url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
+
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=_latest, help="The name against which we will compare")
+        parser.add_option("--answer-big-data", dest="big_data",
+            default=False, help="Should we run against big data, too?",
+            action="store_true")
+        parser.add_option("--answer-name", dest="this_name",
+            default=None,
+            help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+        disable_stream_logging()
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        if options.this_name is None: options.this_name = my_hash
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
+        if options.compare_name is not None:
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
+        global run_big_data
+        run_big_data = options.big_data
+
+    def finalize(self, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestOpener(object):
+    def __init__(self, reference_name):
+        self.reference_name = reference_name
+        self.cache = {}
+
+    def get(self, pf_name, default = None):
+        if pf_name in self.cache: return self.cache[pf_name]
+        url = _url_path % (self.reference_name, pf_name)
+        try:
+            resp = urllib2.urlopen(url)
+            # This is dangerous, but we have a controlled S3 environment
+            data = resp.read()
+            rv = cPickle.loads(data)
+        except urllib2.HTTPError as ex:
+            raise YTNoOldAnswer(url)
+            mylog.warning("Missing %s (%s)", url, ex)
+            rv = default
+        self.cache[pf_name] = rv
+        return rv
+
+ at contextlib.contextmanager
+def temp_cwd(cwd):
+    oldcwd = os.getcwd()
+    os.chdir(cwd)
+    yield
+    os.chdir(oldcwd)
+
+def can_run_pf(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        try:
+            load(pf_fn)
+        except:
+            return False
+    return AnswerTestingTest.result_storage is not None
+
+def data_dir_load(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        pf = load(pf_fn)
+        pf.h
+        return pf
+
+class AnswerTestingTest(object):
+    reference_storage = None
+    def __init__(self, pf_fn):
+        self.pf = data_dir_load(pf_fn)
+
+    def __call__(self):
+        nv = self.run()
+        if self.reference_storage is not None:
+            dd = self.reference_storage.get(str(self.pf))
+            if dd is None: raise YTNoOldAnswer()
+            ov = dd[self.description]
+            self.compare(nv, ov)
+        else:
+            ov = None
+        self.result_storage[str(self.pf)][self.description] = nv
+
+    def compare(self, new_result, old_result):
+        raise RuntimeError
+
+    def create_obj(self, pf, obj_type):
+        # obj_type should be tuple of
+        #  ( obj_name, ( args ) )
+        if obj_type is None:
+            return pf.h.all_data()
+        cls = getattr(pf.h, obj_type[0])
+        obj = cls(*obj_type[1])
+        return obj
+
+    @property
+    def sim_center(self):
+        """
+        This returns the center of the domain.
+        """
+        return 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+
+    @property
+    def max_dens_location(self):
+        """
+        This is a helper function to return the location of the most dense
+        point.
+        """
+        return self.pf.h.find_max("Density")[1]
+
+    @property
+    def entire_simulation(self):
+        """
+        Return an unsorted array of values that cover the entire domain.
+        """
+        return self.pf.h.all_data()
+
+    @property
+    def description(self):
+        obj_type = getattr(self, "obj_type", None)
+        if obj_type is None:
+            oname = "all"
+        else:
+            oname = "_".join((str(s) for s in obj_type))
+        args = [self._type_name, str(self.pf), oname]
+        args += [str(getattr(self, an)) for an in self._attrs]
+        return "_".join(args)
+        
+class FieldValuesTest(AnswerTestingTest):
+    _type_name = "FieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None):
+        super(FieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        avg = obj.quantities["WeightedAverageQuantity"](self.field,
+                             weight="Ones")
+        (mi, ma), = obj.quantities["Extrema"](self.field)
+        return np.array([avg, mi, ma])
+
+    def compare(self, new_result, old_result):
+        assert_equal(new_result, old_result)
+
+class ProjectionValuesTest(AnswerTestingTest):
+    _type_name = "ProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(ProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        return proj.field_data
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class PixelizedProjectionValuesTest(AnswerTestingTest):
+    _type_name = "PixelizedProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(PixelizedProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        frb = proj.to_frb((1.0, 'unitary'), 256)
+        frb[self.field]
+        frb[self.weight_field]
+        d = frb.data
+        d.update( dict( (("%s_sum" % f, proj[f].sum(dtype="float64"))
+                         for f in proj.field_data.keys()) ) )
+        return d
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_rel_equal(new_result[k], old_result[k], 10)
+
+class GridValuesTest(AnswerTestingTest):
+    _type_name = "GridValues"
+    _attrs = ("field",)
+
+    def __init__(self, pf_fn, field):
+        super(GridValuesTest, self).__init__(pf_fn)
+        self.field = field
+
+    def run(self):
+        hashes = {}
+        for g in self.pf.h.grids:
+            hashes[g.id] = hashlib.md5(g[self.field].tostring()).hexdigest()
+            g.clear_data()
+        return hashes
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class GridHierarchyTest(AnswerTestingTest):
+    _type_name = "GridHierarchy"
+    _attrs = ()
+
+    def run(self):
+        result = {}
+        result["grid_dimensions"] = self.pf.h.grid_dimensions
+        result["grid_left_edges"] = self.pf.h.grid_left_edge
+        result["grid_right_edges"] = self.pf.h.grid_right_edge
+        result["grid_levels"] = self.pf.h.grid_levels
+        result["grid_particle_count"] = self.pf.h.grid_particle_count
+        return result
+
+    def compare(self, new_result, old_result):
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class ParentageRelationshipsTest(AnswerTestingTest):
+    _type_name = "ParentageRelationships"
+    _attrs = ()
+    def run(self):
+        result = {}
+        result["parents"] = []
+        result["children"] = []
+        for g in self.pf.h.grids:
+            p = g.Parent
+            if p is None:
+                result["parents"].append(None)
+            elif hasattr(p, "id"):
+                result["parents"].append(p.id)
+            else:
+                result["parents"].append([pg.id for pg in p])
+            result["children"].append([c.id for c in g.Children])
+        return result
+
+    def compare(self, new_result, old_result):
+        for newp, oldp in zip(new_result["parents"], old_result["parents"]):
+            assert(newp == oldp)
+        for newc, oldc in zip(new_result["children"], old_result["children"]):
+            assert(newp == oldp)
+
+def requires_pf(pf_fn, big_data = False):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    if run_big_data == False and big_data == True:
+        return ffalse
+    elif not can_run_pf(pf_fn):
+        return ffalse
+    else:
+        return ftrue
+
+def small_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+
+def big_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)


diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -29,14 +29,12 @@
 # We first create our dictionary of tests to run.  This starts out empty, and
 # as tests are imported it will be filled.
 if "TestRegistry" not in locals():
-    print "Initializing TestRegistry"
     class TestRegistry(dict):
         def __new__(cls, *p, **k):
             if not '_the_instance' in cls.__dict__:
                 cls._the_instance = dict.__new__(cls)
                 return cls._the_instance
 if "test_registry" not in locals():
-    print "Initializing test_registry"
     test_registry = TestRegistry()
 
 # The exceptions we raise, related to the character of the failure.


diff -r 82c77ccccc39ddac6323e12d9c3c21788762339a -r dd8b7e82f5ee0749e131a99665f68d9980024b63 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -154,3 +154,11 @@
     def __str__(self):
         return "No filenames were found to match the pattern: " + \
                "'%s'" % (self.pattern)
+
+class YTNoOldAnswer(YTException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "There is no old answer available.\n" + \
+               str(self.path)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/329544ece9b1/
changeset:   329544ece9b1
branch:      yt
user:        MatthewTurk
date:        2012-10-24 01:13:24
summary:     Turning off _is_fully_enclosed for spheres
affected #:  1 file

diff -r dd8b7e82f5ee0749e131a99665f68d9980024b63 -r 329544ece9b19b441f2491e7dad059c848e9e318 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3502,10 +3502,7 @@
         for gi, g in enumerate(grids): self._grids[gi] = g
 
     def _is_fully_enclosed(self, grid):
-        r = np.abs(grid._corners - self.center)
-        r = np.minimum(r, np.abs(self.DW[None,:]-r))
-        corner_radius = np.sqrt((r**2.0).sum(axis=1))
-        return np.all(corner_radius <= self.radius)
+        return False
 
     @restore_grid_state # Pains me not to decorate with cache_mask here
     def _get_cut_mask(self, grid, field=None):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9003c2843a1d/
changeset:   9003c2843a1d
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-24 02:24:26
summary:     Merging from tip of development branch, including answer testing
affected #:  10 files



diff -r 3a46c354047f5c57101cf1ed6b5b7e6719a5c4a2 -r 9003c2843a1d9358959d44edaf74b61866a043ff setup.py
--- a/setup.py
+++ b/setup.py
@@ -156,7 +156,7 @@
                             'yt = yt.utilities.command_line:run_main',
                       ],
                       'nose.plugins.0.10': [
-                            'answer-testing = yt.utilities.answer_testing.api:AnswerTesting'
+                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
                       ]
         },
         author="Matthew J. Turk",




diff -r 3a46c354047f5c57101cf1ed6b5b7e6719a5c4a2 -r 9003c2843a1d9358959d44edaf74b61866a043ff yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -26,7 +26,8 @@
 from yt.testing import *
 from yt.utilities.answer_testing.framework import \
     requires_pf, \
-    standard_patch_amr, \
+    small_patch_amr, \
+    big_patch_amr, \
     data_dir_load
 from yt.frontends.enzo.api import EnzoStaticOutput
 
@@ -38,14 +39,13 @@
 def test_moving7():
     pf = data_dir_load(m7)
     yield assert_equal, str(pf), "moving7_0010"
-    for test in standard_patch_amr(m7, _fields):
+    for test in small_patch_amr(m7, _fields):
         yield test
 
 g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
- at requires_pf(g30)
+ at requires_pf(g30, big_data=True)
 def test_galaxy0030():
-    return
     pf = data_dir_load(g30)
     yield assert_equal, str(pf), "galaxy0030"
-    for test in standard_patch_amr(g30, _fields):
+    for test in big_patch_amr(g30, _fields):
         yield test


diff -r 3a46c354047f5c57101cf1ed6b5b7e6719a5c4a2 -r 9003c2843a1d9358959d44edaf74b61866a043ff yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -29,6 +29,15 @@
     assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
 
 def assert_rel_equal(a1, a2, decimals):
+    # We have nan checks in here because occasionally we have fields that get
+    # weighted without non-zero weights.  I'm looking at you, particle fields!
+    if isinstance(a1, np.ndarray):
+        assert(a1.size == a2.size)
+        # Mask out NaNs
+        a1[np.isnan(a1)] = 1.0
+        a2[np.isnan(a2)] = 1.0
+    elif np.isnan(a1) and np.isnan(a2):
+        return True
     return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):


diff -r 3a46c354047f5c57101cf1ed6b5b7e6719a5c4a2 -r 9003c2843a1d9358959d44edaf74b61866a043ff yt/utilities/answer_testing/__init__.py
--- a/yt/utilities/answer_testing/__init__.py
+++ b/yt/utilities/answer_testing/__init__.py
@@ -22,10 +22,3 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
-import runner
-import output_tests
-from runner import RegressionTestRunner
-
-from output_tests import RegressionTest, SingleOutputTest, \
-    MultipleOutputTest, YTStaticOutputTest, create_test




diff -r 3a46c354047f5c57101cf1ed6b5b7e6719a5c4a2 -r 9003c2843a1d9358959d44edaf74b61866a043ff yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -28,21 +28,81 @@
 import hashlib
 import contextlib
 import urllib2
+import cPickle
 
+from nose.plugins import Plugin
 from yt.testing import *
-from yt.utilities.command_line import get_yt_version
 from yt.config import ytcfg
-from yt.utilities.logger import \
-    disable_stream_logging
-from nose.plugins import Plugin
 from yt.mods import *
 import cPickle
 
+from yt.utilities.logger import disable_stream_logging
+from yt.utilities.command_line import get_yt_version
+
 mylog = logging.getLogger('nose.plugins.answer-testing')
+run_big_data = False
 
-_latest = "SomeValue"
+_latest = "gold001"
 _url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
 
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=_latest, help="The name against which we will compare")
+        parser.add_option("--answer-big-data", dest="big_data",
+            default=False, help="Should we run against big data, too?",
+            action="store_true")
+        parser.add_option("--answer-name", dest="this_name",
+            default=None,
+            help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+        disable_stream_logging()
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        if options.this_name is None: options.this_name = my_hash
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
+        if options.compare_name is not None:
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
+        global run_big_data
+        run_big_data = options.big_data
+
+    def finalize(self, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
 class AnswerTestOpener(object):
     def __init__(self, reference_name):
         self.reference_name = reference_name
@@ -63,58 +123,6 @@
         self.cache[pf_name] = rv
         return rv
 
-class AnswerTesting(Plugin):
-    name = "answer-testing"
-
-    def options(self, parser, env=os.environ):
-        super(AnswerTesting, self).options(parser, env=env)
-        try:
-            my_hash = get_yt_version()
-        except:
-            my_hash = "UNKNOWN%s" % (time.time())
-        parser.add_option("--answer-compare", dest="compare_name",
-            default=None, help="The name against which we will compare")
-        parser.add_option("--answer-name", dest="this_name",
-            default=my_hash,
-            help="The name we'll call this set of tests")
-        parser.add_option("--answer-store", dest="store_results",
-            default=False, action="store_true")
-
-    def configure(self, options, conf):
-        super(AnswerTesting, self).configure(options, conf)
-        if not self.enabled:
-            return
-        disable_stream_logging()
-        from yt.config import ytcfg
-        ytcfg["yt","__withintesting"] = "True"
-        AnswerTestingTest.result_storage = \
-            self.result_storage = defaultdict(dict)
-        if options.compare_name is not None:
-            # Now we grab from our S3 store
-            if options.compare_name == "latest":
-                options.compare_name = _latest
-            AnswerTestingTest.reference_storage = \
-                AnswerTestOpener(options.compare_name)
-        self.answer_name = options.this_name
-        self.store_results = options.store_results
-
-    def finalize(self, result):
-        # This is where we dump our result storage up to Amazon, if we are able
-        # to.
-        if self.store_results is False: return
-        import boto
-        from boto.s3.key import Key
-        c = boto.connect_s3()
-        bucket = c.get_bucket("yt-answer-tests")
-        for pf_name in self.result_storage:
-            rs = cPickle.dumps(self.result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
-            if tk is not None: tk.delete()
-            k = Key(bucket)
-            k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
-            k.set_acl("public-read")
-
 @contextlib.contextmanager
 def temp_cwd(cwd):
     oldcwd = os.getcwd()
@@ -140,8 +148,6 @@
 
 class AnswerTestingTest(object):
     reference_storage = None
-    result_storage = None
-    description = None
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
@@ -150,11 +156,11 @@
         if self.reference_storage is not None:
             dd = self.reference_storage.get(str(self.pf))
             if dd is None: raise YTNoOldAnswer()
-            ov = dd[self.name]
+            ov = dd[self.description]
             self.compare(nv, ov)
         else:
             ov = None
-        self.result_storage[str(self.pf)][self.name] = nv
+        self.result_storage[str(self.pf)][self.description] = nv
 
     def compare(self, new_result, old_result):
         raise RuntimeError
@@ -191,7 +197,7 @@
         return self.pf.h.all_data()
 
     @property
-    def name(self):
+    def description(self):
         obj_type = getattr(self, "obj_type", None)
         if obj_type is None:
             oname = "all"
@@ -212,7 +218,10 @@
 
     def run(self):
         obj = self.create_obj(self.pf, self.obj_type)
-        return obj[self.field]
+        avg = obj.quantities["WeightedAverageQuantity"](self.field,
+                             weight="Ones")
+        (mi, ma), = obj.quantities["Extrema"](self.field)
+        return np.array([avg, mi, ma])
 
     def compare(self, new_result, old_result):
         assert_equal(new_result, old_result)
@@ -234,7 +243,7 @@
             obj = self.create_obj(self.pf, self.obj_type)
         else:
             obj = None
-        proj = self.pf.h.proj(self.field, self.axis,
+        proj = self.pf.h.proj(self.field, self.axis, 
                               weight_field=self.weight_field,
                               data_source = obj)
         return proj.field_data
@@ -246,6 +255,41 @@
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
+class PixelizedProjectionValuesTest(AnswerTestingTest):
+    _type_name = "PixelizedProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(PixelizedProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.field, self.axis, 
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        frb = proj.to_frb((1.0, 'unitary'), 256)
+        frb[self.field]
+        frb[self.weight_field]
+        d = frb.data
+        d.update( dict( (("%s_sum" % f, proj[f].sum(dtype="float64"))
+                         for f in proj.field_data.keys()) ) )
+        return d
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_rel_equal(new_result[k], old_result[k], 10)
+
 class GridValuesTest(AnswerTestingTest):
     _type_name = "GridValues"
     _attrs = ("field",)
@@ -309,17 +353,19 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
-def requires_pf(pf_fn):
+def requires_pf(pf_fn, big_data = False):
     def ffalse(func):
         return lambda: None
     def ftrue(func):
         return func
-    if not can_run_pf(pf_fn):
+    if run_big_data == False and big_data == True:
+        return ffalse
+    elif not can_run_pf(pf_fn):
         return ffalse
     else:
         return ftrue
 
-def standard_patch_amr(pf_fn, fields):
+def small_patch_amr(pf_fn, fields):
     if not can_run_pf(pf_fn): return
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
     yield GridHierarchyTest(pf_fn)
@@ -334,3 +380,17 @@
                         ds)
                 yield FieldValuesTest(
                         pf_fn, field, ds)
+
+def big_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)




diff -r 3a46c354047f5c57101cf1ed6b5b7e6719a5c4a2 -r 9003c2843a1d9358959d44edaf74b61866a043ff yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -214,8 +214,8 @@
 class ContourCallback(PlotCallback):
     _type_name = "contour"
     def __init__(self, field, ncont=5, factor=4, clim=None,
-                 plot_args = None):
-        """ 
+                 plot_args = None, label = False, label_args = None):
+        """
         annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None,
                          plot_args = None):
 
@@ -233,6 +233,10 @@
         self.clim = clim
         if plot_args is None: plot_args = {'colors':'k'}
         self.plot_args = plot_args
+        self.label = label
+        if label_args is None:
+            label_args = {}
+        self.label_args = label_args
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -293,10 +297,14 @@
         if self.clim is not None: 
             self.ncont = np.linspace(self.clim[0], self.clim[1], self.ncont)
         
-        plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
+        cset = plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
+        
+        if self.label:
+            plot._axes.clabel(cset, **self.label_args)
+        
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/221b5163bffa/
changeset:   221b5163bffa
branch:      yt
user:        MatthewTurk
date:        2012-10-24 02:42:02
summary:     Adding more tests to the ray and ortho ray tests and moving to yield instead of
return
affected #:  2 files

diff -r 329544ece9b19b441f2491e7dad059c848e9e318 -r 221b5163bffa2e478a2b65d80090adf1be45bb2e yt/data_objects/tests/test_ortho_rays.py
--- a/yt/data_objects/tests/test_ortho_rays.py
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -21,5 +21,5 @@
                    (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
                     0.5 * dx[my_axes[1]])
 
-        assert_equal(my_oray['Density'].sum(),
-                     my_all['Density'][my_cells].sum())
+        yield assert_equal, my_oray['Density'].sum(), \
+                            my_all['Density'][my_cells].sum()


diff -r 329544ece9b19b441f2491e7dad059c848e9e318 -r 221b5163bffa2e478a2b65d80090adf1be45bb2e yt/data_objects/tests/test_rays.py
--- a/yt/data_objects/tests/test_rays.py
+++ b/yt/data_objects/tests/test_rays.py
@@ -1,31 +1,33 @@
 from yt.testing import *
 
 def test_ray():
-    pf = fake_random_pf(64, nprocs=8)
-    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
-      pf.domain_dimensions
+    for nproc in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=nproc)
+        dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
 
-    p1 = np.random.random(3)
-    p2 = np.random.random(3)
+        p1 = np.random.random(3)
+        p2 = np.random.random(3)
 
-    my_ray = pf.h.ray(p1, p2)
-    assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
-    ray_cells = my_ray['dts'] > 0
+        my_ray = pf.h.ray(p1, p2)
+        assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
+        ray_cells = my_ray['dts'] > 0
 
-    # find cells intersected by the ray
-    my_all = pf.h.all_data()
-    
-    dt = np.abs(dx / (p2 - p1))
-    tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
-                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
-                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
-    tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
-                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
-                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
-    tin = tin.max(axis=0)
-    tout = tout.min(axis=0)
-    my_cells = (tin < tout) & (tin < 1) & (tout > 0)
+        # find cells intersected by the ray
+        my_all = pf.h.all_data()
+        
+        dt = np.abs(dx / (p2 - p1))
+        tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
+                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
+                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
+        tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
+                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
+                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
+        tin = tin.max(axis=0)
+        tout = tout.min(axis=0)
+        my_cells = (tin < tout) & (tin < 1) & (tout > 0)
 
-    assert_rel_equal(ray_cells.sum(), my_cells.sum(), 14)
-    assert_rel_equal(my_ray['Density'][ray_cells].sum(),
-                     my_all['Density'][my_cells].sum(), 14)
+        yield assert_rel_equal, ray_cells.sum(), my_cells.sum(), 14
+        yield assert_rel_equal, my_ray['Density'][ray_cells].sum(), \
+                                my_all['Density'][my_cells].sum(), 14
+        yield assert_rel_equal, my_ray['dts'].sum(), 1.0, 14



https://bitbucket.org/yt_analysis/yt-3.0/changeset/aedb8fa23cfd/
changeset:   aedb8fa23cfd
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-24 02:42:45
summary:     Merging to get the ray tests
affected #:  2 files

diff -r 9003c2843a1d9358959d44edaf74b61866a043ff -r aedb8fa23cfd362ae8e0e7770fa584b406379a0a yt/data_objects/tests/test_ortho_rays.py
--- a/yt/data_objects/tests/test_ortho_rays.py
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -21,5 +21,5 @@
                    (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
                     0.5 * dx[my_axes[1]])
 
-        assert_equal(my_oray['Density'].sum(),
-                     my_all['Density'][my_cells].sum())
+        yield assert_equal, my_oray['Density'].sum(), \
+                            my_all['Density'][my_cells].sum()


diff -r 9003c2843a1d9358959d44edaf74b61866a043ff -r aedb8fa23cfd362ae8e0e7770fa584b406379a0a yt/data_objects/tests/test_rays.py
--- a/yt/data_objects/tests/test_rays.py
+++ b/yt/data_objects/tests/test_rays.py
@@ -1,31 +1,33 @@
 from yt.testing import *
 
 def test_ray():
-    pf = fake_random_pf(64, nprocs=8)
-    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
-      pf.domain_dimensions
+    for nproc in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=nproc)
+        dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
 
-    p1 = np.random.random(3)
-    p2 = np.random.random(3)
+        p1 = np.random.random(3)
+        p2 = np.random.random(3)
 
-    my_ray = pf.h.ray(p1, p2)
-    assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
-    ray_cells = my_ray['dts'] > 0
+        my_ray = pf.h.ray(p1, p2)
+        assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
+        ray_cells = my_ray['dts'] > 0
 
-    # find cells intersected by the ray
-    my_all = pf.h.all_data()
-    
-    dt = np.abs(dx / (p2 - p1))
-    tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
-                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
-                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
-    tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
-                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
-                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
-    tin = tin.max(axis=0)
-    tout = tout.min(axis=0)
-    my_cells = (tin < tout) & (tin < 1) & (tout > 0)
+        # find cells intersected by the ray
+        my_all = pf.h.all_data()
+        
+        dt = np.abs(dx / (p2 - p1))
+        tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
+                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
+                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
+        tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
+                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
+                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
+        tin = tin.max(axis=0)
+        tout = tout.min(axis=0)
+        my_cells = (tin < tout) & (tin < 1) & (tout > 0)
 
-    assert_rel_equal(ray_cells.sum(), my_cells.sum(), 14)
-    assert_rel_equal(my_ray['Density'][ray_cells].sum(),
-                     my_all['Density'][my_cells].sum(), 14)
+        yield assert_rel_equal, ray_cells.sum(), my_cells.sum(), 14
+        yield assert_rel_equal, my_ray['Density'][ray_cells].sum(), \
+                                my_all['Density'][my_cells].sum(), 14
+        yield assert_rel_equal, my_ray['dts'].sum(), 1.0, 14



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e944f53ca470/
changeset:   e944f53ca470
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-24 03:31:28
summary:     This tests chunking of data for a bunch of different data objects
affected #:  1 file

diff -r aedb8fa23cfd362ae8e0e7770fa584b406379a0a -r e944f53ca470cfde924a247c15e86d2eafd3465a yt/data_objects/tests/test_chunking.py
--- /dev/null
+++ b/yt/data_objects/tests/test_chunking.py
@@ -0,0 +1,36 @@
+from yt.testing import *
+
+def _get_dobjs(c):
+    dobjs = [("sphere", ("center", (1.0, "unitary"))),
+             ("sphere", ("center", (0.1, "unitary"))),
+             ("ortho_ray", (0, (c[x_dict[0]], c[y_dict[0]]))),
+             ("slice", (0, c[0])),
+             #("disk", ("center", [0.1, 0.3, 0.6],
+             #           (0.2, 'unitary'), (0.1, 'unitary'))),
+             ("cutting", ([0.1, 0.3, 0.6], 'center')),
+             ("all_data", ()),
+            ]
+    return dobjs
+
+def test_chunking():
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs)
+        c = (pf.domain_right_edge + pf.domain_left_edge)/2.0 
+        c += 0.5/pf.domain_dimensions
+        for dobj in _get_dobjs(c):
+            obj = getattr(pf.h, dobj[0])(*dobj[1])
+            coords = {'f':{}, 'i':{}}
+            for t in ["io", "all", "spatial"]:
+                coords['i'][t] = []
+                coords['f'][t] = []
+                for chunk in obj.chunks(None, t):
+                    coords['f'][t].append(chunk.fcoords[:,:])
+                    coords['i'][t].append(chunk.icoords[:,:])
+                coords['f'][t] = np.concatenate(coords['f'][t])
+                coords['i'][t] = np.concatenate(coords['i'][t])
+                coords['f'][t].sort()
+                coords['i'][t].sort()
+            yield assert_equal, coords['f']['io'], coords['f']['all']
+            yield assert_equal, coords['f']['io'], coords['f']['spatial']
+            yield assert_equal, coords['i']['io'], coords['i']['all']
+            yield assert_equal, coords['i']['io'], coords['i']['spatial']



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0e160f6608ae/
changeset:   0e160f6608ae
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-24 04:41:33
summary:     Making progress on ray selection, by using the volume traversal code in the
selection routines.
affected #:  4 files

diff -r e944f53ca470cfde924a247c15e86d2eafd3465a -r 0e160f6608ae5bdfcc2ffd272b9d68926dc9b318 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -142,7 +142,17 @@
         #self.vec /= np.sqrt(np.dot(self.vec, self.vec))
         self._set_center(self.start_point)
         self.set_field_parameter('center', self.start_point)
-        self._dts, self._ts, self._masks = {}, {}, {}
+        self._dts, self._ts = None, None
+
+    def _generate_container_field(self, field):
+        if self._current_chunk is None:
+            self.hierarchy._identify_base_chunk(self)
+        if field == "dts":
+            raise NotImplementedError
+        elif field == "t":
+            raise NotImplementedError
+        else:
+            raise KeyError(field)
 
     def _get_data_from_grid(self, grid, field):
         if self.pf.geometry == "cylindrical":


diff -r e944f53ca470cfde924a247c15e86d2eafd3465a -r 0e160f6608ae5bdfcc2ffd272b9d68926dc9b318 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -32,6 +32,8 @@
 from selection_routines cimport SelectorObject
 from oct_container cimport OctreeContainer, OctAllocationContainer, Oct
 #from geometry_utils cimport point_to_hilbert
+from yt.utilities.lib.grid_traversal cimport \
+    VolumeContainer, sample_function, walk_volume
 
 cdef extern from "math.h":
     double exp(double x) nogil
@@ -79,60 +81,6 @@
                     cpos += 1
     return indices
 
-def ray_grids(dobj, np.ndarray[np.float64_t, ndim=2] left_edges,
-                    np.ndarray[np.float64_t, ndim=2] right_edges):
-    cdef int i, ax
-    cdef int i1, i2
-    cdef int ng = left_edges.shape[0]
-    cdef np.ndarray[np.int32_t, ndim=1] gridi = np.zeros(ng, dtype='int32')
-    cdef np.float64_t vs[3], t, p0[3], p1[3], v[3]
-    for i in range(3):
-        p0[i] = dobj.start_point[i]
-        p1[i] = dobj.end_point[i]
-        v[i] = dobj.vec[i]
-    # We check first to see if at any point, the ray intersects a grid face
-    for gi in range(ng):
-        for ax in range(3):
-            i1 = (ax+1) % 3
-            i2 = (ax+2) % 3
-            t = (left_edges[gi,ax] - p0[ax])/v[ax]
-            for i in range(3):
-                vs[i] = t * v[i] + p0[i]
-            if left_edges[gi,i1] <= vs[i1] and \
-               right_edges[gi,i1] >= vs[i1] and \
-               left_edges[gi,i2] <= vs[i2] and \
-               right_edges[gi,i2] >= vs[i2]:
-                gridi[gi] = 1
-                break
-            t = (right_edges[gi,ax] - p0[ax])/v[ax]
-            for i in range(3):
-                vs[i] = t * v[i] + p0[i]
-            if left_edges[gi,i1] <= vs[i1] and \
-               right_edges[gi,i1] >= vs[i1] and \
-               left_edges[gi,i2] <= vs[i2] and \
-               right_edges[gi,i2] >= vs[i2]:
-                gridi[gi] = 1
-                break
-        if gridi[gi] == 1: continue
-        # if the point is fully enclosed, we count the grid
-        if left_edges[gi,0] <= p0[0] and \
-           right_edges[gi,0] >= p0[0] and \
-           left_edges[gi,1] <= p0[1] and \
-           right_edges[gi,1] >= p0[1] and \
-           left_edges[gi,2] <= p0[2] and \
-           right_edges[gi,2] >= p0[2]:
-            gridi[gi] = 1
-            continue
-        if left_edges[gi,0] <= p1[0] and \
-           right_edges[gi,0] >= p1[0] and \
-           left_edges[gi,1] <= p1[1] and \
-           right_edges[gi,1] >= p1[1] and \
-           left_edges[gi,2] <= p1[2] and \
-           right_edges[gi,2] >= p1[2]:
-            gridi[gi] = 1
-            continue
-    return gridi.astype("bool")
-
 # Inclined Box
 
 cdef class SelectorObject:
@@ -284,6 +232,22 @@
                             count += child_mask[i,j,k]
         return count
 
+    def walk_ray(self, gobj):
+        cdef np.ndarray[np.uint8_t, ndim=3, cast=True] child_mask
+        child_mask = gobj.child_mask
+        cdef np.ndarray[np.uint8_t, ndim=3] mask 
+        cdef int ind[3][2]
+        cdef np.ndarray[np.float64_t, ndim=1] odds = gobj.dds
+        cdef np.ndarray[np.float64_t, ndim=1] left_edge = gobj.LeftEdge
+        cdef np.ndarray[np.float64_t, ndim=1] right_edge = gobj.RightEdge
+        cdef int i, j, k
+        cdef np.float64_t dds[3], pos[3]
+        for i in range(3):
+            dds[i] = odds[i]
+            ind[i][0] = 0
+            ind[i][1] = gobj.ActiveDimensions[i]
+        mask = np.zeros(gobj.ActiveDimensions, dtype='uint8')
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -724,6 +688,23 @@
 
 ortho_ray_selector = OrthoRaySelector
 
+cdef struct IntegrationAccumulator:
+    np.float64_t *t
+    np.float64_t *dt
+
+cdef void dt_sampler(
+             VolumeContainer *vc,
+             np.float64_t v_pos[3],
+             np.float64_t v_dir[3],
+             np.float64_t enter_t,
+             np.float64_t exit_t,
+             int index[3],
+             void *data) nogil:
+    cdef IntegrationAccumulator *am = <IntegrationAccumulator *> data
+    cdef int di = (index[0]*vc.dims[1]+index[1])*vc.dims[2]+index[2] 
+    am.t[di] = enter_t
+    am.dt[di] = (exit_t - enter_t)
+
 cdef class RaySelector(SelectorObject):
 
     cdef np.float64_t p1[3]
@@ -734,54 +715,106 @@
         cdef int i
         for i in range(3):
             self.vec[i] = dobj.vec[i]
-        if (0.0 <= self.vec[0]) and (0.0 <= self.vec[1]) and (0.0 <= self.vec[2]):
-            for i in range(3):
-                self.p1[i] = dobj.start_point[i]
-                self.p2[i] = dobj.end_point[i]
-        else:
-            for i in range(3):
-                self.p1[i] = dobj.end_point[i]
-                self.p2[i] = dobj.start_point[i]
+            self.p1[i] = dobj.start_point[i]
+            self.p2[i] = dobj.end_point[i]
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3]) nogil:
-        if ((self.p1[0] <= left_edge[0]) and (self.p2[0] > right_edge[0]) and 
-            (self.p1[1] <= left_edge[1]) and (self.p2[1] > right_edge[1]) and 
-            (self.p1[2] <= left_edge[2]) and (self.p2[2] > right_edge[2])):
+        cdef int i, ax
+        cdef int i1, i2
+        cdef np.float64_t vs[3], t, v[3]
+        for ax in range(3):
+            i1 = (ax+1) % 3
+            i2 = (ax+2) % 3
+            t = (left_edge[ax] - self.p1[ax])/self.vec[ax]
+            for i in range(3):
+                vs[i] = t * self.vec[i] + self.p1[i]
+            if left_edge[i1] <= vs[i1] and \
+               right_edge[i1] >= vs[i1] and \
+               left_edge[i2] <= vs[i2] and \
+               right_edge[i2] >= vs[i2]:
+                return 1
+            t = (right_edge[ax] - self.p1[ax])/self.vec[ax]
+            for i in range(3):
+                vs[i] = t * self.vec[i] + self.p1[i]
+            if left_edge[i1] <= vs[i1] and \
+               right_edge[i1] >= vs[i1] and \
+               left_edge[i2] <= vs[i2] and \
+               right_edge[i2] >= vs[i2]:
+                return 1
+        # if the point is fully enclosed, we count the grid
+        if left_edge[0] <= self.p1[0] and \
+           right_edge[0] >= self.p1[0] and \
+           left_edge[1] <= self.p1[1] and \
+           right_edge[1] >= self.p1[1] and \
+           left_edge[2] <= self.p1[2] and \
+           right_edge[2] >= self.p1[2]:
+            return 1
+        if left_edge[0] <= self.p2[0] and \
+           right_edge[0] >= self.p2[0] and \
+           left_edge[1] <= self.p2[1] and \
+           right_edge[1] >= self.p2[1] and \
+           left_edge[2] <= self.p2[2] and \
+           right_edge[2] >= self.p2[2]:
             return 1
         return 0
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def fill_mask(self, gobj):
+        cdef np.ndarray[np.float64_t, ndim=3] t, dt
+        cdef np.ndarray[np.uint8_t, ndim=3, cast=True] child_mask
+        cdef int i
+        cdef IntegrationAccumulator ia
+        cdef VolumeContainer vc
+        mask = np.zeros(gobj.ActiveDimensions, dtype='uint8')
+        t = np.zeros(gobj.ActiveDimensions, dtype="float64")
+        dt = np.zeros(gobj.ActiveDimensions, dtype="float64")
+        child_mask = gobj.child_mask
+        ia.t = <np.float64_t *> t.data
+        ia.dt = <np.float64_t *> dt.data
+        for i in range(3):
+            vc.left_edge[i] = gobj.LeftEdge[i]
+            vc.right_edge[i] = gobj.RightEdge[i]
+            vc.dds[i] = gobj.dds[i]
+            vc.idds[i] = 1.0/gobj.dds[i]
+            vc.dims[i] = dt.shape[0]
+        walk_volume(&vc, self.p1, self.vec, dt_sampler, <void*> &ia)
+        for i in range(dt.shape[0]):
+            for j in range(dt.shape[1]):
+                for k in range(dt.shape[2]):
+                    if dt[i,j,k] > 0.0 and child_mask[i,j,k] == 1:
+                        mask[i,j,k] = 1
+        return mask.astype("bool")
     
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3],
-                         int eterm[3]) nogil:
-        if ((self.p1[0] <= pos[0] - 0.5*dds[0]) and 
-            (self.p2[0] >  pos[0] + 0.5*dds[0]) and 
-            (self.p1[1] <= pos[1] - 0.5*dds[1]) and 
-            (self.p2[1] >  pos[1] + 0.5*dds[1]) and 
-            (self.p1[2] <= pos[2] - 0.5*dds[2]) and 
-            (self.p2[2] >  pos[2] + 0.5*dds[2])):
-            return 1
-        return 0
-
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef void set_bounds(self,
-                         np.float64_t left_edge[3], np.float64_t right_edge[3],
-                         np.float64_t dds[3], int ind[3][2], int *check):
+    def get_dt(self, gobj):
+        cdef np.ndarray[np.float64_t, ndim=3] t, dt
+        cdef np.ndarray[np.uint8_t, ndim=3, cast=True] child_mask
         cdef int i
+        cdef IntegrationAccumulator ia
+        cdef VolumeContainer vc
+        mask = np.zeros(gobj.ActiveDimensions, dtype='uint8')
+        t = np.zeros(gobj.ActiveDimensions, dtype="float64")
+        dt = np.zeros(gobj.ActiveDimensions, dtype="float64")
+        child_mask = gobj.child_mask
+        ia.t = <np.float64_t *> t.data
+        ia.dt = <np.float64_t *> dt.data
         for i in range(3):
-            ind[i][0] = <int> ((self.p1[i] - left_edge[i])/dds[i])
-            ind[i][1] = ind[i][0] + 1
-        check[0] = 0
-
+            vc.left_edge[i] = gobj.LeftEdge[i]
+            vc.right_edge[i] = gobj.RightEdge[i]
+            vc.dds[i] = gobj.dds[i]
+            vc.idds[i] = 1.0/gobj.dds[i]
+            vc.dims[i] = dt.shape[0]
+        walk_volume(&vc, self.p1, self.vec, dt_sampler, <void*> &ia)
+        return dt, t
+    
 ray_selector = RaySelector
 
 cdef class DataCollectionSelector(SelectorObject):


diff -r e944f53ca470cfde924a247c15e86d2eafd3465a -r 0e160f6608ae5bdfcc2ffd272b9d68926dc9b318 yt/utilities/lib/grid_traversal.pxd
--- /dev/null
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -0,0 +1,54 @@
+"""
+Definitions for the traversal code
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+cdef struct VolumeContainer:
+    int n_fields
+    np.float64_t **data
+    np.float64_t left_edge[3]
+    np.float64_t right_edge[3]
+    np.float64_t dds[3]
+    np.float64_t idds[3]
+    int dims[3]
+
+ctypedef void sample_function(
+                VolumeContainer *vc,
+                np.float64_t v_pos[3],
+                np.float64_t v_dir[3],
+                np.float64_t enter_t,
+                np.float64_t exit_t,
+                int index[3],
+                void *data) nogil
+
+cdef int walk_volume(VolumeContainer *vc,
+                     np.float64_t v_pos[3],
+                     np.float64_t v_dir[3],
+                     sample_function *sampler,
+                     void *data,
+                     np.float64_t *return_t = *,
+                     np.float64_t enter_t = *) nogil


diff -r e944f53ca470cfde924a247c15e86d2eafd3465a -r 0e160f6608ae5bdfcc2ffd272b9d68926dc9b318 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -56,24 +56,6 @@
     double sin(double x) nogil
     double sqrt(double x) nogil
 
-cdef struct VolumeContainer:
-    int n_fields
-    np.float64_t **data
-    np.float64_t left_edge[3]
-    np.float64_t right_edge[3]
-    np.float64_t dds[3]
-    np.float64_t idds[3]
-    int dims[3]
-
-ctypedef void sample_function(
-                VolumeContainer *vc,
-                np.float64_t v_pos[3],
-                np.float64_t v_dir[3],
-                np.float64_t enter_t,
-                np.float64_t exit_t,
-                int index[3],
-                void *data) nogil
-
 cdef class PartitionedGrid:
     cdef public object my_data
     cdef public object LeftEdge



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e890bb2f5bb5/
changeset:   e890bb2f5bb5
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-24 05:11:56
summary:     Adding tcoords to grids; this will be a tricky business to reimplement for Octs
affected #:  4 files

diff -r 0e160f6608ae5bdfcc2ffd272b9d68926dc9b318 -r e890bb2f5bb595128152b6b0e44246256a19c38c yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -542,6 +542,12 @@
         coords[:] = self.Level
         return coords
 
+    def tcoords(self, dobj):
+        mask = self.select(dobj.selector)
+        if mask is None: return np.empty(0, dtype='int64')
+        dt, t = dobj.selector.get_dt(self)
+        return dt[mask], t[mask]
+
     def select(self, selector):
         if id(selector) == self._last_selector_id:
             return self._last_mask


diff -r 0e160f6608ae5bdfcc2ffd272b9d68926dc9b318 -r e890bb2f5bb595128152b6b0e44246256a19c38c yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -102,7 +102,7 @@
 class YTRayBase(YTSelectionContainer1D):
     _type_name = "ray"
     _con_args = ('start_point', 'end_point')
-    sort_by = 't'
+    _container_fields = ("t", "dts")
     def __init__(self, start_point, end_point, pf=None, field_parameters=None):
         """ 
         This is an arbitrarily-aligned ray cast through the entire domain, at a
@@ -148,9 +148,9 @@
         if self._current_chunk is None:
             self.hierarchy._identify_base_chunk(self)
         if field == "dts":
-            raise NotImplementedError
+            return self._current_chunk.dtcoords
         elif field == "t":
-            raise NotImplementedError
+            return self._current_chunk.tcoords
         else:
             raise KeyError(field)
 


diff -r 0e160f6608ae5bdfcc2ffd272b9d68926dc9b318 -r e890bb2f5bb595128152b6b0e44246256a19c38c yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -447,3 +447,27 @@
             ind += c.size
         return ci
 
+    _tcoords = None
+    @property
+    def tcoords(self):
+        if self._tcoords is None:
+            self.dtcoords
+        return self._tcoords
+
+    _dtcoords = None
+    @property
+    def dtcoords(self):
+        if self._dtcoords is not None: return self._dtcoords
+        ct = np.empty(self.data_size, dtype='float64')
+        cdt = np.empty(self.data_size, dtype='float64')
+        self._tcoords = ct
+        self._dtcoords = cdt
+        if self.data_size == 0: return self._dtcoords
+        ind = 0
+        for obj in self.objs:
+            gdt, gt = obj.tcoords(self.dobj)
+            if gt.shape == 0: continue
+            ct[ind:ind+gt.size] = gt
+            cdt[ind:ind+gdt.size] = gdt
+            ind += gt.size
+        return cdt


diff -r 0e160f6608ae5bdfcc2ffd272b9d68926dc9b318 -r e890bb2f5bb595128152b6b0e44246256a19c38c yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -232,22 +232,6 @@
                             count += child_mask[i,j,k]
         return count
 
-    def walk_ray(self, gobj):
-        cdef np.ndarray[np.uint8_t, ndim=3, cast=True] child_mask
-        child_mask = gobj.child_mask
-        cdef np.ndarray[np.uint8_t, ndim=3] mask 
-        cdef int ind[3][2]
-        cdef np.ndarray[np.float64_t, ndim=1] odds = gobj.dds
-        cdef np.ndarray[np.float64_t, ndim=1] left_edge = gobj.LeftEdge
-        cdef np.ndarray[np.float64_t, ndim=1] right_edge = gobj.RightEdge
-        cdef int i, j, k
-        cdef np.float64_t dds[3], pos[3]
-        for i in range(3):
-            dds[i] = odds[i]
-            ind[i][0] = 0
-            ind[i][1] = gobj.ActiveDimensions[i]
-        mask = np.zeros(gobj.ActiveDimensions, dtype='uint8')
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/283b8f4bced6/
changeset:   283b8f4bced6
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-24 20:35:30
summary:     Count hits directly in the ray code and add a bunch of tests to existing
ray_tests that can catch this and related problematic behavior.
affected #:  3 files

diff -r e890bb2f5bb595128152b6b0e44246256a19c38c -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -543,10 +543,8 @@
         return coords
 
     def tcoords(self, dobj):
-        mask = self.select(dobj.selector)
-        if mask is None: return np.empty(0, dtype='int64')
         dt, t = dobj.selector.get_dt(self)
-        return dt[mask], t[mask]
+        return dt, t
 
     def select(self, selector):
         if id(selector) == self._last_selector_id:


diff -r e890bb2f5bb595128152b6b0e44246256a19c38c -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 yt/data_objects/tests/test_rays.py
--- a/yt/data_objects/tests/test_rays.py
+++ b/yt/data_objects/tests/test_rays.py
@@ -5,29 +5,39 @@
         pf = fake_random_pf(64, nprocs=nproc)
         dx = (pf.domain_right_edge - pf.domain_left_edge) / \
           pf.domain_dimensions
+        # Three we choose, to get varying vectors, and ten random
+        pp1 = np.random.random((3, 13))
+        pp2 = np.random.random((3, 13))
+        pp1[:,0] = [0.1, 0.2, 0.3]
+        pp2[:,0] = [0.8, 0.1, 0.4]
+        pp1[:,1] = [0.9, 0.2, 0.3]
+        pp2[:,1] = [0.8, 0.1, 0.4]
+        pp1[:,2] = [0.9, 0.2, 0.9]
+        pp2[:,2] = [0.8, 0.1, 0.4]
+        for i in range(pp1.shape[1]):
+            p1 = pp1[:,i] + 1e-8 * np.random.random(3)
+            p2 = pp2[:,i] + 1e-8 * np.random.random(3)
 
-        p1 = np.random.random(3)
-        p2 = np.random.random(3)
+            my_ray = pf.h.ray(p1, p2)
+            yield assert_rel_equal, my_ray['dts'].sum(), 1.0, 14
+            ray_cells = my_ray['dts'] > 0
 
-        my_ray = pf.h.ray(p1, p2)
-        assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
-        ray_cells = my_ray['dts'] > 0
+            # find cells intersected by the ray
+            my_all = pf.h.all_data()
+            
+            dt = np.abs(dx / (p2 - p1))
+            tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
+                                   [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
+                                   [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
+            tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
+                                   [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
+                                   [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
+            tin = tin.max(axis=0)
+            tout = tout.min(axis=0)
+            my_cells = (tin < tout) & (tin < 1) & (tout > 0)
+            dts = np.clip(tout[my_cells], 0.0, 1.0) - np.clip(tin[my_cells], 0.0, 1.0)
 
-        # find cells intersected by the ray
-        my_all = pf.h.all_data()
-        
-        dt = np.abs(dx / (p2 - p1))
-        tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
-                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
-                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
-        tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
-                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
-                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
-        tin = tin.max(axis=0)
-        tout = tout.min(axis=0)
-        my_cells = (tin < tout) & (tin < 1) & (tout > 0)
-
-        yield assert_rel_equal, ray_cells.sum(), my_cells.sum(), 14
-        yield assert_rel_equal, my_ray['Density'][ray_cells].sum(), \
-                                my_all['Density'][my_cells].sum(), 14
-        yield assert_rel_equal, my_ray['dts'].sum(), 1.0, 14
+            yield assert_equal, ray_cells.sum(), my_cells.sum()
+            yield assert_rel_equal, my_ray['Density'][ray_cells].sum(), \
+                                    my_all['Density'][my_cells].sum(), 14
+            yield assert_rel_equal, my_ray['dts'].sum(), 1.0, 14


diff -r e890bb2f5bb595128152b6b0e44246256a19c38c -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -675,6 +675,8 @@
 cdef struct IntegrationAccumulator:
     np.float64_t *t
     np.float64_t *dt
+    np.uint8_t *child_mask
+    int hits
 
 cdef void dt_sampler(
              VolumeContainer *vc,
@@ -686,6 +688,9 @@
              void *data) nogil:
     cdef IntegrationAccumulator *am = <IntegrationAccumulator *> data
     cdef int di = (index[0]*vc.dims[1]+index[1])*vc.dims[2]+index[2] 
+    if am.child_mask[di] == 0 or enter_t == exit_t:
+        return
+    am.hits += 1
     am.t[di] = enter_t
     am.dt[di] = (exit_t - enter_t)
 
@@ -719,7 +724,8 @@
             if left_edge[i1] <= vs[i1] and \
                right_edge[i1] >= vs[i1] and \
                left_edge[i2] <= vs[i2] and \
-               right_edge[i2] >= vs[i2]:
+               right_edge[i2] >= vs[i2] and \
+               0.0 <= t <= 1.0:
                 return 1
             t = (right_edge[ax] - self.p1[ax])/self.vec[ax]
             for i in range(3):
@@ -727,7 +733,8 @@
             if left_edge[i1] <= vs[i1] and \
                right_edge[i1] >= vs[i1] and \
                left_edge[i2] <= vs[i2] and \
-               right_edge[i2] >= vs[i2]:
+               right_edge[i2] >= vs[i2] and\
+               0.0 <= t <= 1.0:
                 return 1
         # if the point is fully enclosed, we count the grid
         if left_edge[0] <= self.p1[0] and \
@@ -757,47 +764,69 @@
         cdef VolumeContainer vc
         mask = np.zeros(gobj.ActiveDimensions, dtype='uint8')
         t = np.zeros(gobj.ActiveDimensions, dtype="float64")
-        dt = np.zeros(gobj.ActiveDimensions, dtype="float64")
+        dt = np.zeros(gobj.ActiveDimensions, dtype="float64") - 1
         child_mask = gobj.child_mask
         ia.t = <np.float64_t *> t.data
         ia.dt = <np.float64_t *> dt.data
+        ia.child_mask = <np.uint8_t *> child_mask.data
+        ia.hits = 0
         for i in range(3):
             vc.left_edge[i] = gobj.LeftEdge[i]
             vc.right_edge[i] = gobj.RightEdge[i]
             vc.dds[i] = gobj.dds[i]
             vc.idds[i] = 1.0/gobj.dds[i]
-            vc.dims[i] = dt.shape[0]
+            vc.dims[i] = dt.shape[i]
         walk_volume(&vc, self.p1, self.vec, dt_sampler, <void*> &ia)
         for i in range(dt.shape[0]):
             for j in range(dt.shape[1]):
                 for k in range(dt.shape[2]):
-                    if dt[i,j,k] > 0.0 and child_mask[i,j,k] == 1:
+                    if dt[i,j,k] >= 0:
                         mask[i,j,k] = 1
         return mask.astype("bool")
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def count_cells(self, gobj):
+        return self.fill_mask(gobj).sum()
     
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
     def get_dt(self, gobj):
         cdef np.ndarray[np.float64_t, ndim=3] t, dt
+        cdef np.ndarray[np.float64_t, ndim=1] tr, dtr
         cdef np.ndarray[np.uint8_t, ndim=3, cast=True] child_mask
-        cdef int i
+        cdef int i, j, k, ni
         cdef IntegrationAccumulator ia
         cdef VolumeContainer vc
-        mask = np.zeros(gobj.ActiveDimensions, dtype='uint8')
         t = np.zeros(gobj.ActiveDimensions, dtype="float64")
-        dt = np.zeros(gobj.ActiveDimensions, dtype="float64")
+        dt = np.zeros(gobj.ActiveDimensions, dtype="float64") - 1
         child_mask = gobj.child_mask
         ia.t = <np.float64_t *> t.data
         ia.dt = <np.float64_t *> dt.data
+        ia.child_mask = <np.uint8_t *> child_mask.data
+        ia.hits = 0
         for i in range(3):
             vc.left_edge[i] = gobj.LeftEdge[i]
             vc.right_edge[i] = gobj.RightEdge[i]
             vc.dds[i] = gobj.dds[i]
             vc.idds[i] = 1.0/gobj.dds[i]
-            vc.dims[i] = dt.shape[0]
+            vc.dims[i] = dt.shape[i]
         walk_volume(&vc, self.p1, self.vec, dt_sampler, <void*> &ia)
-        return dt, t
+        tr = np.zeros(ia.hits, dtype="float64")
+        dtr = np.zeros(ia.hits, dtype="float64")
+        ni = 0
+        for i in range(dt.shape[0]):
+            for j in range(dt.shape[1]):
+                for k in range(dt.shape[2]):
+                    if dt[i,j,k] >= 0:
+                        tr[ni] = t[i,j,k]
+                        dtr[ni] = dt[i,j,k]
+                        ni += 1
+        if not (ni == ia.hits):
+            print ni, ia.hits
+        return dtr, tr
     
 ray_selector = RaySelector
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/814a93ee3207/
changeset:   814a93ee3207
branch:      yt
user:        scopatz
date:        2012-10-23 21:33:27
summary:     added ray trace stuff.
affected #:  4 files

diff -r 76ab79a5ae9af5a5b062b49907bf85fc95e6bf0c -r 814a93ee320766d02a677b07a441cbc3dcc660a5 yt/data_objects/tests/test_profiles.py
--- /dev/null
+++ b/yt/data_objects/tests/test_profiles.py
@@ -0,0 +1,74 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+
+_fields = ("Density", "Temperature", "Dinosaurs", "Tribbles")
+
+def test_profiles():
+    pf = fake_random_pf(64, nprocs = 8, fields = _fields)
+    nv = pf.domain_dimensions.prod()
+    dd = pf.h.all_data()
+    (rmi, rma), (tmi, tma), (dmi, dma) = dd.quantities["Extrema"](
+        ["Density", "Temperature", "Dinosaurs"])
+    rt, tt, dt = dd.quantities["TotalQuantity"](
+        ["Density", "Temperature", "Dinosaurs"])
+    # First we look at the 
+    for nb in [8, 16, 32, 64]:
+        for lr in [True, False]:
+            # We log all the fields or don't log 'em all.  No need to do them
+            # individually.
+            for lf in [True, False]: 
+                # We have the min and the max, but to avoid cutting them off
+                # since we aren't doing end-collect, we cut a bit off the edges
+                for ec, e1, e2 in [(False, 0.9, 1.1), (True, 1.0, 1.0)]:
+                    p1d = BinnedProfile1D(dd, 
+                        nb, "Density", rmi*e1, rma*e2, lf,
+                        lr, end_collect=ec)
+                    p1d.add_fields(["Ones", "Temperature"], weight=None)
+                    yield assert_equal, p1d["Ones"].sum(), nv
+                    yield assert_rel_equal, tt, p1d["Temperature"].sum(), 7
+
+                    p2d = BinnedProfile2D(dd, 
+                        nb, "Density", rmi*e1, rma*e2, lf,
+                        nb, "Temperature", tmi*e1, tma*e2, lf,
+                        lr, end_collect=ec)
+                    p2d.add_fields(["Ones", "Temperature"], weight=None)
+                    yield assert_equal, p2d["Ones"].sum(), nv
+                    yield assert_rel_equal, tt, p2d["Temperature"].sum(), 7
+
+                    p3d = BinnedProfile3D(dd, 
+                        nb, "Density", rmi*e1, rma*e2, lf,
+                        nb, "Temperature", tmi*e1, tma*e2, lf,
+                        nb, "Dinosaurs", dmi*e1, dma*e2, lf,
+                        lr, end_collect=ec)
+                    p3d.add_fields(["Ones", "Temperature"], weight=None)
+                    yield assert_equal, p3d["Ones"].sum(), nv
+                    yield assert_rel_equal, tt, p3d["Temperature"].sum(), 7
+
+            p1d = BinnedProfile1D(dd, nb, "x", 0.0, 1.0, log_space=False)
+            p1d.add_fields("Ones", weight=None)
+            av = nv / nb
+            yield assert_equal, p1d["Ones"][:-1], np.ones(nb)*av
+            # We re-bin ones with a weight now
+            p1d.add_fields(["Ones"], weight="Temperature")
+            yield assert_equal, p1d["Ones"][:-1], np.ones(nb)
+
+            p2d = BinnedProfile2D(dd, nb, "x", 0.0, 1.0, False,
+                                      nb, "y", 0.0, 1.0, False)
+            p2d.add_fields("Ones", weight=None)
+            av = nv / nb**2
+            yield assert_equal, p2d["Ones"][:-1,:-1], np.ones((nb, nb))*av
+            # We re-bin ones with a weight now
+            p2d.add_fields(["Ones"], weight="Temperature")
+            yield assert_equal, p2d["Ones"][:-1,:-1], np.ones((nb, nb))
+
+            p3d = BinnedProfile3D(dd, nb, "x", 0.0, 1.0, False,
+                                      nb, "y", 0.0, 1.0, False,
+                                      nb, "z", 0.0, 1.0, False)
+            p3d.add_fields("Ones", weight=None)
+            av = nv / nb**3
+            yield assert_equal, p3d["Ones"][:-1,:-1,:-1], np.ones((nb, nb, nb))*av
+            # We re-bin ones with a weight now
+            p3d.add_fields(["Ones"], weight="Temperature")
+            yield assert_equal, p3d["Ones"][:-1,:-1,:-1], np.ones((nb,nb,nb))
+


diff -r 76ab79a5ae9af5a5b062b49907bf85fc95e6bf0c -r 814a93ee320766d02a677b07a441cbc3dcc660a5 yt/data_objects/tests/test_projection.py
--- /dev/null
+++ b/yt/data_objects/tests/test_projection.py
@@ -0,0 +1,35 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+
+def test_projection():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = 1)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        dd = pf.h.all_data()
+        rho_tot = dd.quantities["TotalQuantity"]("Density")[0]
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        # Some simple projection tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                proj = pf.h.proj(ax, ["Ones", "Density"], weight_field = wf)
+                yield assert_equal, proj["Ones"].sum(), proj["Ones"].size
+                yield assert_equal, proj["Ones"].min(), 1.0
+                yield assert_equal, proj["Ones"].max(), 1.0
+                yield assert_equal, np.unique(proj["px"]), uc[xax]
+                yield assert_equal, np.unique(proj["py"]), uc[yax]
+                yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+            # wf == None
+            yield assert_equal, wf, None
+            v1 = proj["Density"].sum()
+            v2 = (dd["Density"] * dd["d%s" % an]).sum()
+            yield assert_rel_equal, v1, v2, 10


diff -r 76ab79a5ae9af5a5b062b49907bf85fc95e6bf0c -r 814a93ee320766d02a677b07a441cbc3dcc660a5 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -24,7 +24,10 @@
 
 import numpy as np
 from yt.funcs import *
-from numpy.testing import assert_array_equal
+from numpy.testing import assert_array_equal, assert_equal, assert_almost_equal
+
+def assert_rel_equal(a1, a2, decimels):
+    return assert_almost_equal(a1/a2, 1.0, decimels)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 
@@ -127,7 +130,8 @@
 
     return left, right, level
 
-def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",), negative = False):
+def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",),
+                   negative = False, nprocs = 1):
     from yt.frontends.stream.api import load_uniform_grid
     if not iterable(ndims):
         ndims = [ndims, ndims, ndims]
@@ -139,5 +143,5 @@
         offset = 0.0
     data = dict((field, (np.random.random(ndims) - offset) * peak_value)
                  for field in fields)
-    ug = load_uniform_grid(data, ndims, 1.0)
+    ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
     return ug


diff -r 76ab79a5ae9af5a5b062b49907bf85fc95e6bf0c -r 814a93ee320766d02a677b07a441cbc3dcc660a5 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -7,6 +7,8 @@
 Affiliation: UC Berkeley
 Author: Stephen Skory <s at skory.us>
 Affiliation: UC San Diego
+Author: Anthony Scopatz <scopatz at gmail.com>
+Affiliation: The University of Chicago
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008-2011 Matthew Turk, JS Oishi, Stephen Skory.  All Rights Reserved.
@@ -1124,3 +1126,152 @@
     def __call__(self,plot):
         plot._axes.set_title(self.title)
 
+class FlashRayDataCallback(PlotCallback):
+    _type_name = "flash_ray_data"
+    def __init__(self, cmap_name='bone', sample=None):
+        """ 
+        annotate_flash_ray_data(cmap_name='bone', sample=None)
+
+        Adds ray trace data to the plot.  *cmap_name* is the name of the color map 
+        ('bone', 'jet', 'hot', etc).  *sample* dictates the amount of down sampling 
+        to do to prevent all of the rays from being  plotted.  This may be None 
+        (plot all rays, default), an integer (step size), or a slice object.
+        """
+        self.cmap_name = cmap_name
+        self.sample = sample if isinstance(sample, slice) else slice(None, None, sample)
+
+    def __call__(self, plot):
+        ray_data = plot.data.pf._handle["RayData"][:]
+        idx = ray_data[:,0].argsort(kind="mergesort")
+        ray_data = ray_data[idx]
+
+        tags = ray_data[:,0]
+        coords = ray_data[:,1:3]
+        power = ray_data[:,4]
+        power /= power.max()
+        cx, cy = self.convert_to_plot(plot, coords.T)
+        coords[:,0], coords[:,1] = cx, cy
+        splitidx = np.argwhere(0 < (tags[1:] - tags[:-1])) + 1
+        coords = np.split(coords, splitidx.flat)[self.sample]
+        power = np.split(power, splitidx.flat)[self.sample]
+        cmap = matplotlib.cm.get_cmap(self.cmap_name)
+
+        plot._axes.hold(True)
+        colors = [cmap(p.max()) for p in power]
+        lc = matplotlib.collections.LineCollection(coords, colors=colors)
+        plot._axes.add_collection(lc)
+        plot._axes.hold(False)
+
+
+class TimestampCallback(PlotCallback):
+    _type_name = "timestamp"
+    _time_conv = {
+          'as': 1e-18,
+          'attosec': 1e-18,
+          'attosecond': 1e-18,
+          'attoseconds': 1e-18,
+          'fs': 1e-15,
+          'femtosec': 1e-15,
+          'femtosecond': 1e-15,
+          'femtoseconds': 1e-15,
+          'ps': 1e-12,
+          'picosec': 1e-12,
+          'picosecond': 1e-12,
+          'picoseconds': 1e-12,
+          'ns': 1e-9,
+          'nanosec': 1e-9,
+          'nanosecond':1e-9,
+          'nanoseconds' : 1e-9,
+          'us': 1e-6,
+          'microsec': 1e-6,
+          'microsecond': 1e-6,
+          'microseconds': 1e-6,
+          'ms': 1e-3,
+          'millisec': 1e-3,
+          'millisecond': 1e-3,
+          'milliseconds': 1e-3,
+          's': 1.0,
+          'sec': 1.0,
+          'second':1.0,
+          'seconds': 1.0,
+          'm': 60.0,
+          'min': 60.0,
+          'minute': 60.0,
+          'minutes': 60.0,
+          'h': 3600.0,
+          'hour': 3600.0,
+          'hours': 3600.0,
+          'd': 86400.0,
+          'day': 86400.0,
+          'days': 86400.0,
+          'y': 86400.0*365.25,
+          'year': 86400.0*365.25,
+          'years': 86400.0*365.25,
+          'ev': 1e-9 * 7.6e-8 / 6.03,
+          'kev': 1e-12 * 7.6e-8 / 6.03,
+          'mev': 1e-15 * 7.6e-8 / 6.03,
+          }
+
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", **kwargs):
+        """ 
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
+
+        Adds the current time to the plot at point given by *x* and *y*.  If *units* 
+        is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 
+        *units* is None, it will attempt to figure out the correct value by which to 
+        scale.  The *format* keyword is a template string that will be evaluated and 
+        displayed on the plot.  All other *kwargs* will be passed to the text() 
+        method on the plot axes.  See matplotlib's text() functions for more 
+        information.
+        """
+        self.x = x
+        self.y = y
+        self.format = format
+        self.units = units
+        self.kwargs = {'color': 'w'}
+        self.kwargs.update(kwargs)
+
+    def __call__(self, plot):
+        if self.units is None:
+            t = plot.data.pf.current_time
+            scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
+            self.units = 's'
+            for k in scale_keys:
+                if t < self._time_conv[k]:
+                    break
+                self.units = k
+        t = plot.data.pf.current_time / self._time_conv[self.units.lower()]
+        if self.units == 'us':
+            self.units = '$\\mu s$'
+        s = self.format.format(time=t, units=self.units)
+        plot._axes.hold(True)
+        plot._axes.text(self.x, self.y, s, **self.kwargs)
+        plot._axes.hold(False)
+
+
+class MaterialBoundaryCallback(ContourCallback):
+    _type_name = "material_boundary"
+    def __init__(self, field='targ', ncont=1, factor=4, clim=(0.9, 1.0), **kwargs):
+        """ 
+        annotate_material_boundary(self, field='targ', ncont=1, factor=4, 
+                                   clim=(0.9, 1.0), **kwargs):
+
+        Add the limiting contours of *field* to the plot.  Nominally, *field* is 
+        the target material but may be any other field present in the hierarchy.
+        The number of contours generated is given by *ncount*, *factor* governs 
+        the number of points used in the interpolation, and *clim* gives the 
+        (upper, lower) limits for contouring.  For this to truly be the boundary
+        *clim* should be close to the edge.  For example the default is (0.9, 1.0)
+        for 'targ' which is defined on the range [0.0, 1.0].  All other *kwargs* 
+        will be passed to the contour() method on the plot axes.  See matplotlib
+        for more information.
+        """
+        plot_args = {'colors': 'w'}
+        plot_args.update(kwargs)
+        super(MaterialBoundaryCallback, self).__init__(field=field, ncont=ncont,
+                                                       factor=factor, clim=clim,
+                                                       plot_args=plot_args)
+
+    def __call__(self, plot):
+        super(MaterialBoundaryCallback, self).__call__(plot)
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/df3c48e34cc9/
changeset:   df3c48e34cc9
branch:      yt
user:        scopatz
date:        2012-10-23 21:35:34
summary:     merge commit
affected #:  36 files

diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
-include distribute_setup.py
+include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.hh *.h README* 
+recursive-include yt *.pyx *.pxd *.hh *.h README*


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 nose.cfg
--- /dev/null
+++ b/nose.cfg
@@ -0,0 +1,4 @@
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,3 +1,9 @@
 [egg_info]
 #tag_build = .dev
 #tag_svn_revision = 1
+
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing
+with-xunit=1


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -31,7 +31,7 @@
 from yt.funcs import *
 from yt.utilities.performance_counters import yt_counters, time_function
 try:
-    from yt.utilities.kdtree import \
+    from yt.utilities.kdtree.api import \
         chainHOP_tags_dens, \
         create_tree, fKD, find_nn_nearest_neighbors, \
         free_tree, find_chunk_nearest_neighbors


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -606,6 +606,7 @@
 
         if newProfile:
             mylog.info("Writing halo %d" % halo['id'])
+            if os.path.exists(filename): os.remove(filename)
             if filename.endswith('.h5'):
                 profile.write_out_h5(filename)
             else:
@@ -717,7 +718,9 @@
             Default=True.
         njobs : int
             The number of jobs over which to split the projections.  Set
-            to -1 so that each halo is done by a single processor.
+            to -1 so that each halo is done by a single processor.  Halo 
+            projections do not currently work in parallel, so this must 
+            be set to -1.
             Default: -1.
         dynamic : bool
             If True, distribute halos using a task queue.  If False,
@@ -731,6 +734,12 @@
 
         """
 
+        # Halo projections cannot run in parallel because they are done by 
+        # giving a data source to the projection object.
+        if njobs > 0:
+            mylog.warn("Halo projections cannot use more than one processor per halo, setting njobs to -1.")
+            njobs = -1
+        
         # Get list of halos for projecting.
         if halo_list == 'filtered':
             halo_projection_list = self.filtered_halos


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -30,7 +30,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import ParallelAnalysisInterface, parallel_blocking_call, parallel_root_only
 
 try:
-    from yt.utilities.kdtree import *
+    from yt.utilities.kdtree.api import *
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -38,6 +38,7 @@
     inline = 'False',
     numthreads = '-1',
     __withinreason = 'False',
+    __withintesting = 'False',
     __parallel = 'False',
     __global_parallel_rank = '0',
     __global_parallel_size = '1',


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -237,6 +237,7 @@
     def __set_default_field_parameters(self):
         self.set_field_parameter("center",np.zeros(3,dtype='float64'))
         self.set_field_parameter("bulk_velocity",np.zeros(3,dtype='float64'))
+        self.set_field_parameter("normal",np.array([0,0,1],dtype='float64'))
 
     def _set_center(self, center):
         if center is None:
@@ -3658,7 +3659,7 @@
 class AMRCoveringGridBase(AMR3DData):
     _spatial = True
     _type_name = "covering_grid"
-    _con_args = ('level', 'left_edge', 'right_edge', 'ActiveDimensions')
+    _con_args = ('level', 'left_edge', 'ActiveDimensions')
     def __init__(self, level, left_edge, dims, fields = None,
                  pf = None, num_ghost_zones = 0, use_pbar = True, **kwargs):
         """A 3D region with all data extracted to a single, specified
@@ -3685,8 +3686,9 @@
                            fields=fields, pf=pf, **kwargs)
         self.left_edge = np.array(left_edge)
         self.level = level
-        self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.ActiveDimensions = np.array(dims,dtype='int32')
+        rdx = self.pf.domain_dimensions*self.pf.refine_by**level
+        self.dds = self.pf.domain_width/rdx.astype("float64")
+        self.ActiveDimensions = np.array(dims, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones
         self._use_pbar = use_pbar


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -598,16 +598,16 @@
                     continue
             else:
                 nz_filter = None
-            mins.append(data[field][nz_filter].min())
-            maxs.append(data[field][nz_filter].max())
+            mins.append(np.nanmin(data[field][nz_filter]))
+            maxs.append(np.nanmax(data[field][nz_filter]))
         else:
             if this_filter.any():
                 if non_zero:
                     nz_filter = ((this_filter) &
                                  (data[field][this_filter] > 0.0))
                 else: nz_filter = this_filter
-                mins.append(data[field][nz_filter].min())
-                maxs.append(data[field][nz_filter].max())
+                mins.append(np.nanmin(data[field][nz_filter]))
+                maxs.append(np.nanmax(data[field][nz_filter]))
             else:
                 mins.append(1e90)
                 maxs.append(-1e90)


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -160,7 +160,8 @@
             # required attrs
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
-                pf.hubble_constant = pf.cosmological_simulation = 0.0
+                pf.cosmological_simulation = 0.0
+            pf.hubble_constant = 0.7
             pf.domain_left_edge = np.zeros(3, 'float64')
             pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/data_objects/tests/test_covering_grid.py
--- /dev/null
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -0,0 +1,27 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_covering_grid():
+    # We decompose in different ways
+    for level in [0, 1, 2]:
+        for nprocs in [1, 2, 4, 8]:
+            pf = fake_random_pf(16, nprocs = nprocs)
+            dn = pf.refine_by**level 
+            cg = pf.h.covering_grid(level, [0.0, 0.0, 0.0],
+                    dn * pf.domain_dimensions)
+            yield assert_equal, cg["Ones"].max(), 1.0
+            yield assert_equal, cg["Ones"].min(), 1.0
+            yield assert_equal, cg["CellVolume"].sum(), pf.domain_width.prod()
+            for g in pf.h.grids:
+                di = g.get_global_startindex()
+                dd = g.ActiveDimensions
+                for i in range(dn):
+                    f = cg["Density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
+                                      dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
+                                      dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
+                    yield assert_equal, f, g["Density"]


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/data_objects/tests/test_derived_quantities.py
--- /dev/null
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -0,0 +1,24 @@
+from yt.testing import *
+import numpy as np
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_extrema():
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(16, nprocs = nprocs, fields = ("Density",
+                "x-velocity", "y-velocity", "z-velocity"))
+        sp = pf.h.sphere("c", (0.25, '1'))
+        (mi, ma), = sp.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(sp["Density"])
+        yield assert_equal, ma, np.nanmax(sp["Density"])
+        dd = pf.h.all_data()
+        (mi, ma), = dd.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(dd["Density"])
+        yield assert_equal, ma, np.nanmax(dd["Density"])
+        sp = pf.h.sphere("max", (0.25, '1'))
+        yield assert_equal, np.any(np.isnan(sp["RadialVelocity"])), True
+        (mi, ma), = dd.quantities["Extrema"]("RadialVelocity")
+        yield assert_equal, mi, np.nanmin(dd["RadialVelocity"])
+        yield assert_equal, ma, np.nanmax(dd["RadialVelocity"])


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/data_objects/tests/test_extract_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -0,0 +1,53 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_cut_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        r = dd.cut_region( [ "grid['Temperature'] > 0.5",
+                             "grid['Density'] < 0.75",
+                             "grid['x-velocity'] > 0.25" ])
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        t2 = (r["Temperature"] < 0.75)
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+
+def test_extract_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        r = dd.extract_region(t)
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        t2 = (r["Temperature"] < 0.75)
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+        t3 = (r["Temperature"] < 0.75)
+        r3 = r.extract_region( t3 )
+        yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
+        yield assert_equal, np.all(r3["Temperature"] < 0.75), True


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/data_objects/tests/test_fields.py
--- /dev/null
+++ b/yt/data_objects/tests/test_fields.py
@@ -0,0 +1,91 @@
+from yt.testing import *
+import numpy as np
+from yt.data_objects.field_info_container import \
+    FieldInfo
+import yt.data_objects.universal_fields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    np.seterr(all = 'ignore')
+
+_sample_parameters = dict(
+    axis = 0,
+    center = np.array((0.0, 0.0, 0.0)),
+    bulk_velocity = np.array((0.0, 0.0, 0.0)),
+    normal = np.array((0.0, 0.0, 1.0)),
+    cp_x_vec = np.array((1.0, 0.0, 0.0)),
+    cp_y_vec = np.array((0.0, 1.0, 0.0)),
+    cp_z_vec = np.array((0.0, 0.0, 1.0)),
+)
+
+_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
+
+def realistic_pf(fields, nprocs):
+    pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
+    pf.parameters["HydroMethod"] = "streaming"
+    pf.parameters["Gamma"] = 5.0/3.0
+    pf.parameters["EOSType"] = 1.0
+    pf.parameters["EOSSoundSpeed"] = 1.0
+    pf.conversion_factors["Time"] = 1.0
+    pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
+    pf.current_redshift = 0.0001
+    pf.hubble_constant = 0.7
+    for unit in mpc_conversion:
+        pf.units[unit+'h'] = pf.units[unit]
+        pf.units[unit+'cm'] = pf.units[unit]
+        pf.units[unit+'hcm'] = pf.units[unit]
+    return pf
+
+class TestFieldAccess(object):
+    description = None
+
+    def __init__(self, field_name, nproc):
+        # Note this should be a field name
+        self.field_name = field_name
+        self.description = "Accessing_%s_%s" % (field_name, nproc)
+        self.nproc = nproc
+
+    def __call__(self):
+        field = FieldInfo[self.field_name]
+        deps = field.get_dependencies()
+        fields = deps.requested + _base_fields
+        skip_grids = False
+        needs_spatial = False
+        for v in field.validators:
+            f = getattr(v, "fields", None)
+            if f: fields += f
+            if getattr(v, "ghost_zones", 0) > 0:
+                skip_grids = True
+            if hasattr(v, "ghost_zones"):
+                needs_spatial = True
+        pf = realistic_pf(fields, self.nproc)
+        # This gives unequal sized grids as well as subgrids
+        dd1 = pf.h.all_data()
+        dd2 = pf.h.all_data()
+        dd1.field_parameters.update(_sample_parameters)
+        dd2.field_parameters.update(_sample_parameters)
+        v1 = dd1[self.field_name]
+        conv = field._convert_function(dd1) or 1.0
+        if not needs_spatial:
+            assert_equal(v1, conv*field._function(field, dd2))
+        if not skip_grids:
+            for g in pf.h.grids:
+                g.field_parameters.update(_sample_parameters)
+                conv = field._convert_function(g) or 1.0
+                v1 = g[self.field_name]
+                g.clear_data()
+                g.field_parameters.update(_sample_parameters)
+                assert_equal(v1, conv*field._function(field, g))
+
+def test_all_fields():
+    for field in FieldInfo:
+        if field.startswith("CuttingPlane"): continue
+        if field.startswith("particle"): continue
+        if field.startswith("CIC"): continue
+        if field.startswith("WeakLensingConvergence"): continue
+        if FieldInfo[field].particle_type: continue
+        for nproc in [1, 4, 8]:
+            yield TestFieldAccess(field, nproc)


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/data_objects/tests/test_ortho_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -0,0 +1,25 @@
+from yt.testing import *
+
+def test_ortho_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
+
+    axes = ['x', 'y', 'z']
+    for ax, an in enumerate(axes):
+        ocoord = np.random.random(2)
+
+        my_oray = pf.h.ortho_ray(ax, ocoord)
+
+        my_axes = range(3)
+        del my_axes[ax]
+
+        # find the cells intersected by the ortho ray
+        my_all = pf.h.all_data()
+        my_cells = (np.abs(my_all[axes[my_axes[0]]] - ocoord[0]) <= 
+                    0.5 * dx[my_axes[0]]) & \
+                   (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
+                    0.5 * dx[my_axes[1]])
+
+        assert_equal(my_oray['Density'].sum(),
+                     my_all['Density'][my_cells].sum())


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -2,11 +2,15 @@
 from yt.data_objects.profiles import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
 
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
 def test_projection():
     for nprocs in [8, 1]:
         # We want to test both 1 proc and 8 procs, to make sure that
         # parallelism isn't broken
-        pf = fake_random_pf(64, nprocs = 1)
+        pf = fake_random_pf(64, nprocs = nprocs)
         dims = pf.domain_dimensions
         xn, yn, zn = pf.domain_dimensions
         xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/data_objects/tests/test_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_rays.py
@@ -0,0 +1,31 @@
+from yt.testing import *
+
+def test_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+      pf.domain_dimensions
+
+    p1 = np.random.random(3)
+    p2 = np.random.random(3)
+
+    my_ray = pf.h.ray(p1, p2)
+    assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
+    ray_cells = my_ray['dts'] > 0
+
+    # find cells intersected by the ray
+    my_all = pf.h.all_data()
+    
+    dt = np.abs(dx / (p2 - p1))
+    tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
+                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
+                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
+    tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
+                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
+                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
+    tin = tin.max(axis=0)
+    tout = tout.min(axis=0)
+    my_cells = (tin < tout) & (tin < 1) & (tout > 0)
+
+    assert_rel_equal(ray_cells.sum(), my_cells.sum(), 14)
+    assert_rel_equal(my_ray['Density'][ray_cells].sum(),
+                     my_all['Density'][my_cells].sum(), 14)


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -258,8 +258,11 @@
 
         """
         if isinstance(filenames, types.StringTypes):
+            pattern = filenames
             filenames = glob.glob(filenames)
             filenames.sort()
+            if len(filenames) == 0:
+                raise YTNoFilenamesMatchPattern(pattern)
         obj = cls(filenames[:], parallel = parallel)
         return obj
 


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -32,7 +32,7 @@
 
 from yt.funcs import *
 
-from yt.utilities.lib import CICDeposit_3, obtain_rvec
+from yt.utilities.lib import CICDeposit_3, obtain_rvec, obtain_rv_vec
 from yt.utilities.cosmology import Cosmology
 from field_info_container import \
     add_field, \
@@ -54,7 +54,19 @@
      kboltz, \
      G, \
      rho_crit_now, \
-     speed_of_light_cgs
+     speed_of_light_cgs, \
+     km_per_cm
+
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
      
 # Note that, despite my newfound efforts to comply with PEP-8,
 # I violate it here in order to keep the name/func_name relationship
@@ -179,12 +191,8 @@
 
 def _VelocityMagnitude(field, data):
     """M{|v|}"""
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    return ( (data["x-velocity"]-bulk_velocity[0])**2.0 + \
-             (data["y-velocity"]-bulk_velocity[1])**2.0 + \
-             (data["z-velocity"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
+    velocities = obtain_rv_vec(data)
+    return np.sqrt(np.sum(velocities**2,axis=0))
 add_field("VelocityMagnitude", function=_VelocityMagnitude,
           take_log=False, units=r"\rm{cm}/\rm{s}")
 
@@ -194,13 +202,6 @@
           function=_TangentialOverVelocityMagnitude,
           take_log=False)
 
-def _TangentialVelocity(field, data):
-    return np.sqrt(data["VelocityMagnitude"]**2.0
-                 - data["RadialVelocity"]**2.0)
-add_field("TangentialVelocity", 
-          function=_TangentialVelocity,
-          take_log=False, units=r"\rm{cm}/\rm{s}")
-
 def _Pressure(field, data):
     """M{(Gamma-1.0)*rho*E}"""
     return (data.pf["Gamma"] - 1.0) * \
@@ -223,14 +224,9 @@
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
       
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The spherical coordinates radius is simply the magnitude of the
-    ## coords vector.
-
-    return np.sqrt(np.sum(coords**2,axis=-1))
+    return get_sph_r(coords)
 
 def _Convert_sph_r_CGS(data):
    return data.convert("cm")
@@ -245,20 +241,9 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The angle (theta) with respect to the normal (J), is the arccos
-    ## of the dot product of the normal with the normalized coords
-    ## vector.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    JdotCoords = np.sum(J*coords,axis=-1)
-    
-    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
+    return get_sph_theta(coords, normal)
 
 add_field("sph_theta", function=_sph_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
@@ -269,54 +254,21 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
-    
-    ## We have freedom with respect to what axis (xprime) to define
-    ## the disk angle. Here I've chosen to use the axis that is
-    ## perpendicular to the normal and the y-axis. When normal ==
-    ## y-hat, then set xprime = z-hat. With this definition, when
-    ## normal == z-hat (as is typical), then xprime == x-hat.
-    ##
-    ## The angle is then given by the arctan of the ratio of the
-    ## yprime-component and the xprime-component of the coords vector.
+    coords = obtain_rvec(data)
 
-    xprime = np.cross([0.0,1.0,0.0],normal)
-    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
-    yprime = np.cross(normal,xprime)
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
-    
-    Px = np.sum(Jx*coords,axis=-1)
-    Py = np.sum(Jy*coords,axis=-1)
-    
-    return np.arctan2(Py,Px)
+    return get_sph_phi(coords, normal)
 
 add_field("sph_phi", function=_sph_phi,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
 
-
-
 ### cylindrical coordinates: R (radius in the cylinder's plane)
 def _cyl_R(field, data):
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
       
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The cross product of the normal (J) with the coords vector
-    ## gives a vector of magnitude equal to the cylindrical radius.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    JcrossCoords = np.cross(J,coords)
-    return np.sqrt(np.sum(JcrossCoords**2,axis=-1))
+    return get_cyl_r(coords, normal)
 
 def _Convert_cyl_R_CGS(data):
    return data.convert("cm")
@@ -324,6 +276,9 @@
 add_field("cyl_R", function=_cyl_R,
          validators=[ValidateParameter("center"),ValidateParameter("normal")],
          convert_function = _Convert_cyl_R_CGS, units=r"\rm{cm}")
+add_field("cyl_RCode", function=_cyl_R,
+          validators=[ValidateParameter("center"),ValidateParameter("normal")],
+          units=r"Radius (code)")
 
 
 ### cylindrical coordinates: z (height above the cylinder's plane)
@@ -331,17 +286,9 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The dot product of the normal (J) with the coords vector gives
-    ## the cylindrical height.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    return np.sum(J*coords,axis=-1)  
+    return get_cyl_z(coords, normal)
 
 def _Convert_cyl_z_CGS(data):
    return data.convert("cm")
@@ -352,14 +299,17 @@
 
 
 ### cylindrical coordinates: theta (angle in the cylinder's plane)
-### [This is identical to the spherical coordinate's 'phi' angle.]
 def _cyl_theta(field, data):
-    return data['sph_phi']
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = obtain_rvec(data)
+
+    return get_cyl_theta(coords, normal)
 
 add_field("cyl_theta", function=_cyl_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
 
-
 ### The old field DiskAngle is the same as the spherical coordinates'
 ### 'theta' angle. I'm keeping DiskAngle for backwards compatibility.
 def _DiskAngle(field, data):
@@ -392,6 +342,54 @@
                       ValidateParameter("normal")],
           units=r"AU", display_field=False)
 
+def _cyl_RadialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)
+
+    theta = data['cyl_theta']
+
+    return get_cyl_r_component(velocities, theta, normal)
+
+def _cyl_RadialVelocityABS(field, data):
+    return np.abs(_cyl_RadialVelocity(field, data))
+def _Convert_cyl_RadialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_RadialVelocity", function=_cyl_RadialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityABS", function=_cyl_RadialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMS", function=_cyl_RadialVelocity,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMSABS", function=_cyl_RadialVelocityABS,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+
+def _cyl_TangentialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)
+    theta = data['cyl_theta']
+
+    return get_cyl_theta_component(velocities, theta, normal)
+
+def _cyl_TangentialVelocityABS(field, data):
+    return np.abs(_cyl_TangentialVelocity(field, data))
+def _Convert_cyl_TangentialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_TangentialVelocity", function=_cyl_TangentialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityABS", function=_cyl_TangentialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMS", function=_cyl_TangentialVelocity,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMSABS", function=_cyl_TangentialVelocityABS,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
 
 def _DynamicalTime(field, data):
     """
@@ -450,7 +448,7 @@
 
 # This is rho_total / rho_cr(z).
 def _Convert_Overdensity(data):
-    return 1 / (rho_crit_now * data.pf.hubble_constant**2 * 
+    return 1.0 / (rho_crit_now * data.pf.hubble_constant**2 * 
                 (1+data.pf.current_redshift)**3)
 add_field("Overdensity",function=_Matter_Density,
           convert_function=_Convert_Overdensity, units=r"")
@@ -470,8 +468,8 @@
     else:
         omega_baryon_now = 0.0441
     return data['Density'] / (omega_baryon_now * rho_crit_now * 
-                              (data.pf['CosmologyHubbleConstantNow']**2) * 
-                              ((1+data.pf['CosmologyCurrentRedshift'])**3))
+                              (data.pf.hubble_constant**2) * 
+                              ((1+data.pf.current_redshift)**3))
 add_field("Baryon_Overdensity", function=_Baryon_Overdensity, 
           units=r"")
 
@@ -640,13 +638,7 @@
           take_log=False, display_field=False)
 
 def obtain_velocities(data):
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = np.zeros(3, dtype='float64')
-    xv = data["x-velocity"] - bv[0]
-    yv = data["y-velocity"] - bv[1]
-    zv = data["z-velocity"] - bv[2]
-    return xv, yv, zv
+    return obtain_rv_vec(data)
 
 def _convertSpecificAngularMomentum(data):
     return data.convert("cm")
@@ -711,7 +703,7 @@
 #          convert_function=_convertSpecificAngularMomentum, vector_field=True,
 #          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter('center')])
 def _convertSpecificAngularMomentumKMSMPC(data):
-    return data.convert("mpc")/1e5
+    return km_per_cm*data.convert("mpc")
 #add_field("ParticleSpecificAngularMomentumKMSMPC",
 #          function=_ParticleSpecificAngularMomentum, particle_type=True,
 #          convert_function=_convertSpecificAngularMomentumKMSMPC, vector_field=True,
@@ -883,33 +875,32 @@
           display_name = "Radius (code)")
 
 def _RadialVelocity(field, data):
-    center = data.get_field_parameter("center")
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    new_field = ( (data['x']-center[0])*(data["x-velocity"]-bulk_velocity[0])
-                + (data['y']-center[1])*(data["y-velocity"]-bulk_velocity[1])
-                + (data['z']-center[2])*(data["z-velocity"]-bulk_velocity[2])
-                )/data["RadiusCode"]
-    if np.any(np.isnan(new_field)): # to fix center = point
-        new_field[np.isnan(new_field)] = 0.0
-    return new_field
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)    
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_r_component(velocities, theta, phi, normal)
+
 def _RadialVelocityABS(field, data):
     return np.abs(_RadialVelocity(field, data))
 def _ConvertRadialVelocityKMS(data):
-    return 1e-5
+    return km_per_cm
 add_field("RadialVelocity", function=_RadialVelocity,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityABS", function=_RadialVelocityABS,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityKMS", function=_RadialVelocity,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
 add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
+
+def _TangentialVelocity(field, data):
+    return np.sqrt(data["VelocityMagnitude"]**2.0
+                 - data["RadialVelocity"]**2.0)
+add_field("TangentialVelocity", 
+          function=_TangentialVelocity,
+          take_log=False, units=r"\rm{cm}/\rm{s}")
 
 def _CuttingPlaneVelocityX(field, data):
     x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
@@ -1026,6 +1017,47 @@
           display_name=r"\rm{Magnetic}\/\rm{Energy}",
           units="\rm{ergs}\/\rm{cm}^{-3}")
 
+def _BPoloidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_theta_component(Bfields, theta, phi, normal)
+
+add_field("BPoloidal", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BToroidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    phi   = data['sph_phi']
+
+    return get_sph_phi_component(Bfields, phi, normal)
+
+add_field("BToroidal", function=_BToroidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BRadial(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_r_component(Bfields, theta, phi, normal)
+
+add_field("BRadial", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
 def _VorticitySquared(field, data):
     mylog.debug("Generating vorticity on %s", data)
     # We need to set up stencils


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -353,7 +353,8 @@
             psize = get_psize(np.array(data[key].shape), nprocs)
             grid_left_edges, grid_right_edges, temp[key] = \
                 decompose_array(data[key], psize, bbox)
-            grid_dimensions = np.array([grid.shape for grid in temp[key]])
+            grid_dimensions = np.array([grid.shape for grid in temp[key]],
+                                       dtype="int32")
         for gid in range(nprocs):
             new_data[gid] = {}
             for key in temp.keys():
@@ -364,7 +365,7 @@
         sfh.update({0:data})
         grid_left_edges = domain_left_edge
         grid_right_edges = domain_right_edge
-        grid_dimensions = domain_dimensions.reshape(nprocs,3)
+        grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
 
     handler = StreamHandler(
         grid_left_edges,


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -310,7 +310,8 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       ytcfg.getboolean("yt", "ipython_notebook"):
+       ytcfg.getboolean("yt", "ipython_notebook") or \
+       ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):
         from yt.gui.reason.extdirect_repl import ExtProgressBar




diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -68,9 +68,12 @@
 def evaluate_domain_decomposition(n_d, pieces, ldom):
     """ Evaluate longest to shortest edge ratio
         BEWARE: lot's of magic here """
-    ideal_bsize = 3.0 * (pieces * np.product(n_d) ** 2) ** (1.0 / 3.0)
-    bsize = int(np.sum(
-        ldom / np.array(n_d, dtype=np.float64) * np.product(n_d)))
+    eff_dim = (n_d > 1).sum()
+    ideal_bsize = eff_dim * (pieces * np.product(n_d) ** (eff_dim - 1)
+                             ) ** (1.0 / eff_dim)
+    mask = np.where(n_d > 1)
+    nd_arr = np.array(n_d, dtype=np.float64)[mask]
+    bsize = int(np.sum(ldom[mask] / nd_arr * np.product(nd_arr)))
     load_balance = float(np.product(n_d)) / \
         (float(pieces) * np.product((n_d - 1) / ldom + 1))
 
@@ -134,23 +137,15 @@
 
 
 def split_array(tab, psize):
-    """ Split array into px*py*pz subarrays using internal numpy routine. """
-    temp = [np.array_split(array, psize[1], axis=1)
-            for array in np.array_split(tab, psize[2], axis=2)]
-    temp = [item for sublist in temp for item in sublist]
-    temp = [np.array_split(array, psize[0], axis=0) for array in temp]
-    temp = [item for sublist in temp for item in sublist]
-    return temp
-
-
-if __name__ == "__main__":
-
-    NPROC = 12
-    ARRAY = np.zeros((128, 128, 129))
-    BBOX = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
-
-    PROCS = get_psize(np.array(ARRAY.shape), NPROC)
-    LE, RE, DATA = decompose_array(ARRAY, PROCS, BBOX)
-
-    for idx in range(NPROC):
-        print LE[idx, :], RE[idx, :], DATA[idx].shape
+    """ Split array into px*py*pz subarrays. """
+    n_d = np.array(tab.shape, dtype=np.int64)
+    slices = []
+    for i in range(psize[0]):
+        for j in range(psize[1]):
+            for k in range(psize[2]):
+                piece = np.array((i, j, k), dtype=np.int64)
+                lei = n_d * piece / psize
+                rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
+                slices.append(np.s_[lei[0]:rei[0], lei[1]:
+                                    rei[1], lei[2]:rei[2]])
+    return [tab[slc] for slc in slices]


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -146,3 +146,11 @@
     def __str__(self):
         return "You must create an API key before uploading.  See " + \
                "https://data.yt-project.org/getting_started.html"
+
+class YTNoFilenamesMatchPattern(YTException):
+    def __init__(self, pattern):
+        self.pattern = pattern
+
+    def __str__(self):
+        return "No filenames were found to match the pattern: " + \
+               "'%s'" % (self.pattern)


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/utilities/kdtree/__init__.py
--- a/yt/utilities/kdtree/__init__.py
+++ b/yt/utilities/kdtree/__init__.py
@@ -1,1 +0,0 @@
-from fKDpy import *


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/utilities/kdtree/api.py
--- /dev/null
+++ b/yt/utilities/kdtree/api.py
@@ -0,0 +1,9 @@
+from fKDpy import \
+    chainHOP_tags_dens, \
+    create_tree, \
+    fKD, \
+    find_nn_nearest_neighbors, \
+    free_tree, \
+    find_chunk_nearest_neighbors
+
+


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/utilities/kdtree/test.py
--- a/yt/utilities/kdtree/test.py
+++ /dev/null
@@ -1,58 +0,0 @@
-from Forthon import *
-from fKDpy import *
-import numpy,random
-
-n = 32768
-
-
-fKD.tags = fzeros((64),'i')
-fKD.dist = fzeros((64),'d')
-fKD.pos = fzeros((3,n),'d')
-fKD.nn = 64
-fKD.nparts = n
-fKD.sort = True
-fKD.rearrange = True
-fKD.qv = numpy.array([16./32, 16./32, 16./32])
-
-fp = open('parts.txt','r')
-xpos = []
-ypos = []
-zpos = []
-line = fp.readline()
-while line:
-    line = line.split()
-    xpos.append(float(line[0]))
-    ypos.append(float(line[1]))
-    zpos.append(float(line[2]))
-    line= fp.readline()
-
-fp.close()
-
-
-for k in range(32):
-    for j in range(32):
-        for i in range(32):
-            fKD.pos[0][i + j*32 + k*1024] = float(i)/32 + 1./64 + 0.0001*random.random()
-            fKD.pos[1][i + j*32 + k*1024] = float(j)/32 + 1./64 + 0.0001*random.random()
-            fKD.pos[2][i + j*32 + k*1024] = float(k)/32 + 1./64 + 0.0001*random.random()
-
-            
-
-#print fKD.pos[0][0],fKD.pos[1][0],fKD.pos[2][0]
-
-create_tree()
-
-
-find_nn_nearest_neighbors()
-
-#print 'next'
-
-#fKD.qv = numpy.array([0., 0., 0.])
-
-#find_nn_nearest_neighbors()
-
-
-#print (fKD.tags - 1)
-#print fKD.dist
-
-free_tree()


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -338,3 +338,47 @@
                     rg[2,i,j,k] = zg[i,j,k] - c[2]
         return rg
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def obtain_rv_vec(data):
+    # This is just to let the pointers exist and whatnot.  We can't cdef them
+    # inside conditionals.
+    cdef np.ndarray[np.float64_t, ndim=1] vxf
+    cdef np.ndarray[np.float64_t, ndim=1] vyf
+    cdef np.ndarray[np.float64_t, ndim=1] vzf
+    cdef np.ndarray[np.float64_t, ndim=2] rvf
+    cdef np.ndarray[np.float64_t, ndim=3] vxg
+    cdef np.ndarray[np.float64_t, ndim=3] vyg
+    cdef np.ndarray[np.float64_t, ndim=3] vzg
+    cdef np.ndarray[np.float64_t, ndim=4] rvg
+    cdef np.float64_t bv[3]
+    cdef int i, j, k
+    bulk_velocity = data.get_field_parameter("bulk_velocity")
+    if bulk_velocity == None:
+        bulk_velocity = np.zeros(3)
+    bv[0] = bulk_velocity[0]; bv[1] = bulk_velocity[1]; bv[2] = bulk_velocity[2]
+    if len(data['x-velocity'].shape) == 1:
+        # One dimensional data
+        vxf = data['x-velocity'].astype("float64")
+        vyf = data['y-velocity'].astype("float64")
+        vzf = data['z-velocity'].astype("float64")
+        rvf = np.empty((3, vxf.shape[0]), 'float64')
+        for i in range(vxf.shape[0]):
+            rvf[0, i] = vxf[i] - bv[0]
+            rvf[1, i] = vyf[i] - bv[1]
+            rvf[2, i] = vzf[i] - bv[2]
+        return rvf
+    else:
+        # Three dimensional data
+        vxg = data['x-velocity'].astype("float64")
+        vyg = data['y-velocity'].astype("float64")
+        vzg = data['z-velocity'].astype("float64")
+        rvg = np.empty((3, vxg.shape[0], vxg.shape[1], vxg.shape[2]), 'float64')
+        for i in range(vxg.shape[0]):
+            for j in range(vxg.shape[1]):
+                for k in range(vxg.shape[2]):
+                    rvg[0,i,j,k] = vxg[i,j,k] - bv[0]
+                    rvg[1,i,j,k] = vyg[i,j,k] - bv[1]
+                    rvg[2,i,j,k] = vzg[i,j,k] - bv[2]
+        return rvg


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -233,49 +233,6 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def obtain_rvec(data):
-    # This is just to let the pointers exist and whatnot.  We can't cdef them
-    # inside conditionals.
-    cdef np.ndarray[np.float64_t, ndim=1] xf
-    cdef np.ndarray[np.float64_t, ndim=1] yf
-    cdef np.ndarray[np.float64_t, ndim=1] zf
-    cdef np.ndarray[np.float64_t, ndim=2] rf
-    cdef np.ndarray[np.float64_t, ndim=3] xg
-    cdef np.ndarray[np.float64_t, ndim=3] yg
-    cdef np.ndarray[np.float64_t, ndim=3] zg
-    cdef np.ndarray[np.float64_t, ndim=4] rg
-    cdef np.float64_t c[3]
-    cdef int i, j, k
-    center = data.get_field_parameter("center")
-    c[0] = center[0]; c[1] = center[1]; c[2] = center[2]
-    if len(data['x'].shape) == 1:
-        # One dimensional data
-        xf = data['x']
-        yf = data['y']
-        zf = data['z']
-        rf = np.empty((3, xf.shape[0]), 'float64')
-        for i in range(xf.shape[0]):
-            rf[0, i] = xf[i] - c[0]
-            rf[1, i] = yf[i] - c[1]
-            rf[2, i] = zf[i] - c[2]
-        return rf
-    else:
-        # Three dimensional data
-        xg = data['x']
-        yg = data['y']
-        zg = data['z']
-        rg = np.empty((3, xg.shape[0], xg.shape[1], xg.shape[2]), 'float64')
-        for i in range(xg.shape[0]):
-            for j in range(xg.shape[1]):
-                for k in range(xg.shape[2]):
-                    rg[0,i,j,k] = xg[i,j,k] - c[0]
-                    rg[1,i,j,k] = yg[i,j,k] - c[1]
-                    rg[2,i,j,k] = zg[i,j,k] - c[2]
-        return rg
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
 def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data,
                        np.ndarray[np.float64_t, ndim=1] l_corner,
                        np.ndarray[np.float64_t, ndim=1] r_corner):


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/utilities/lib/tests/test_geometry_utils.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_geometry_utils.py
@@ -0,0 +1,30 @@
+from yt.testing import *
+from yt.utilities.lib import obtain_rvec, obtain_rv_vec
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_obtain_rvec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+    
+    dd = pf.h.sphere((0.5,0.5,0.5), 0.2)
+
+    coords = obtain_rvec(dd)
+
+    r = np.sqrt(np.sum(coords*coords,axis=0))
+
+    assert_array_less(r.max(), 0.2)
+
+    assert_array_less(0.0, r.min())
+
+def test_obtain_rv_vec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+
+    dd = pf.h.all_data()
+
+    vels = obtain_rv_vec(dd)
+
+    assert_array_equal(vels[0,:], dd['x-velocity'])
+    assert_array_equal(vels[1,:], dd['y-velocity'])
+    assert_array_equal(vels[2,:], dd['z-velocity'])


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -674,3 +674,191 @@
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
     
     return R
+
+def get_ortho_basis(normal):
+    xprime = np.cross([0.0,1.0,0.0],normal)
+    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
+    yprime = np.cross(normal,xprime)
+    zprime = normal
+    return (xprime, yprime, zprime)
+
+def get_sph_r(coords):
+    # The spherical coordinates radius is simply the magnitude of the
+    # coordinate vector.
+
+    return np.sqrt(np.sum(coords**2, axis=0))
+
+def resize_vector(vector,vector_array):
+    if len(vector_array.shape) == 4:
+        res_vector = np.resize(vector,(3,1,1,1))
+    else:
+        res_vector = np.resize(vector,(3,1))
+    return res_vector
+
+def get_sph_theta(coords, normal):
+    # The angle (theta) with respect to the normal (J), is the arccos
+    # of the dot product of the normal with the normalized coordinate
+    # vector.
+    
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    
+    J = np.tile(res_normal,tile_shape)
+
+    JdotCoords = np.sum(J*coords,axis=0)
+    
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=0)) )
+
+def get_sph_phi(coords, normal):
+    # We have freedom with respect to what axis (xprime) to define
+    # the disk angle. Here I've chosen to use the axis that is
+    # perpendicular to the normal and the y-axis. When normal ==
+    # y-hat, then set xprime = z-hat. With this definition, when
+    # normal == z-hat (as is typical), then xprime == x-hat.
+    #
+    # The angle is then given by the arctan of the ratio of the
+    # yprime-component and the xprime-component of the coordinate 
+    # vector.
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, coords)
+    res_yprime = resize_vector(yprime, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    Px = np.sum(Jx*coords,axis=0)
+    Py = np.sum(Jy*coords,axis=0)
+    
+    return np.arctan2(Py,Px)
+
+def get_cyl_r(coords, normal):
+    # The cross product of the normal (J) with a coordinate vector
+    # gives a vector of magnitude equal to the cylindrical radius.
+
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
+    
+    JcrossCoords = np.cross(J, coords, axisa=0, axisb=0, axisc=0)
+    return np.sqrt(np.sum(JcrossCoords**2, axis=0))
+
+def get_cyl_z(coords, normal):
+    # The dot product of the normal (J) with the coordinate vector 
+    # gives the cylindrical height.
+
+    res_normal = resize_vector(normal, coords)
+    
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
+
+    return np.sum(J*coords, axis=0)  
+
+def get_cyl_theta(coords, normal):
+    # This is identical to the spherical phi component
+
+    return get_sph_phi(coords, normal)
+
+
+def get_cyl_r_component(vectors, theta, normal):
+    # The r of a vector is the vector dotted with rhat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    rhat = Jx*np.cos(theta) + Jy*np.sin(theta)
+
+    return np.sum(vectors*rhat,axis=0)
+
+def get_cyl_theta_component(vectors, theta, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    thetahat = -Jx*np.sin(theta) + Jy*np.cos(theta)
+
+    return np.sum(vectors*thetahat, axis=0)
+
+def get_cyl_z_component(vectors, normal):
+    # The z component of a vector is the vector dotted with zhat
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    zhat = np.tile(res_zprime, tile_shape)
+
+    return np.sum(vectors*zhat, axis=0)
+
+def get_sph_r_component(vectors, theta, phi, normal):
+    # The r component of a vector is the vector dotted with rhat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
+
+    rhat = Jx*np.sin(theta)*np.cos(phi) + \
+           Jy*np.sin(theta)*np.sin(phi) + \
+           Jz*np.cos(theta)
+
+    return np.sum(vectors*rhat, axis=0)
+
+def get_sph_phi_component(vectors, phi, normal):
+    # The phi component of a vector is the vector dotted with phihat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    phihat = -Jx*np.sin(phi) + Jy*np.cos(phi)
+
+    return np.sum(vectors*phihat, axis=0)
+
+def get_sph_theta_component(vectors, theta, phi, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
+    
+    thetahat = Jx*np.cos(theta)*np.cos(phi) + \
+               Jy*np.cos(theta)*np.sin(phi) - \
+               Jz*np.sin(theta)
+
+    return np.sum(vectors*thetahat, axis=0)


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/utilities/tests/test_coordinate_conversions.py
--- /dev/null
+++ b/yt/utilities/tests/test_coordinate_conversions.py
@@ -0,0 +1,125 @@
+from yt.testing import *
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
+
+# Randomly generated coordinates in the domain [[-1,1],[-1,1],-1,1]]
+coords = np.array([[-0.41503037, -0.22102472, -0.55774212],
+                   [ 0.73828247, -0.17913899,  0.64076921],
+                   [ 0.08922066, -0.94254844, -0.61774511],
+                   [ 0.10173242, -0.95789145,  0.16294352],
+                   [ 0.73186508, -0.3109153 ,  0.75728738],
+                   [ 0.8757989 , -0.41475119, -0.57039201],
+                   [ 0.58040762,  0.81969082,  0.46759728],
+                   [-0.89983356, -0.9853683 , -0.38355343]]).T
+
+def test_spherical_coordinate_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.72950559,  0.99384957,  1.13047198,  0.97696269,  
+                   1.09807968,  1.12445067,  1.10788685,  1.38843954]
+    real_theta = [ 2.44113629,  0.87012028,  2.14891444,  1.4032274 ,  
+                   0.80979483,  2.10280198,  1.13507735,  1.85068416]
+    real_phi =   [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+
+    calc_r = get_sph_r(coords)
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+    normal = [1, 0, 0]
+    real_theta = [ 2.17598842,  0.73347681,  1.49179079,  1.46647589,  
+                   0.8412984 ,  0.67793705,  1.0193883 ,  2.27586987]
+    real_phi =   [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                   -2.75201227,-0.62870527,  2.08920872, -1.19959244]
+
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+    
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+def test_cylindrical_coordiante_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.47021498,  0.75970506,  0.94676179,  0.96327853,  
+                   0.79516968,  0.96904193,  1.00437346,  1.3344104 ]    
+    real_theta = [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+    real_z =     [-0.55774212,  0.64076921, -0.61774511,  0.16294352,
+                   0.75728738, -0.57039201,  0.46759728, -0.38355343]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+    normal = [1, 0, 0]
+    real_r =     [ 0.59994016,  0.66533898,  1.12694569,  0.97165149,
+                   0.81862843,  0.70524152,  0.94368441,  1.05738542]
+    real_theta = [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                  -2.75201227, -0.62870527,  2.08920872, -1.19959244]
+    real_z =     [-0.41503037,  0.73828247,  0.08922066,  0.10173242,
+                   0.73186508,  0.8757989 ,  0.58040762, -0.89983356]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+def test_spherical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_sph_theta(coords, normal)
+    phi = get_sph_phi(coords, normal)
+    zero = np.tile(0,coords.shape[1])
+
+    # Purely radial field
+    vecs = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(phi), np.cos(phi), zero])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
+
+    # Purely poloidal field
+    vecs = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -np.sin(theta)])
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
+
+def test_cylindrical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_cyl_theta(coords, normal)
+    z = get_cyl_z(coords, normal)
+    zero = np.tile(0, coords.shape[1])
+
+    # Purely radial field
+    vecs = np.array([np.cos(theta), np.sin(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(theta), np.cos(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))
+
+    # Purely z field
+    vecs = np.array([zero, zero, z])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/utilities/tests/test_decompose.py
--- /dev/null
+++ b/yt/utilities/tests/test_decompose.py
@@ -0,0 +1,96 @@
+"""
+Test suite for cartesian domain decomposition.
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import assert_array_equal, assert_almost_equal
+import numpy as np
+import yt.utilities.decompose as dec
+
+
+def setup():
+    pass
+
+
+def test_psize_2d():
+    procs = dec.get_psize(np.array([5, 1, 7]), 6)
+    assert_array_equal(procs, np.array([3, 1, 2]))
+    procs = dec.get_psize(np.array([1, 7, 5]), 6)
+    assert_array_equal(procs, np.array([1, 2, 3]))
+    procs = dec.get_psize(np.array([7, 5, 1]), 6)
+    assert_array_equal(procs, np.array([2, 3, 1]))
+
+
+def test_psize_3d():
+    procs = dec.get_psize(np.array([33, 35, 37]), 12)
+    assert_array_equal(procs, np.array([3, 2, 2]))
+
+
+def test_decomposition_2d():
+    array = np.ones((7, 5, 1))
+    bbox = np.array([[-0.7, 0.0], [1.5, 2.0], [0.0, 0.7]])
+    ledge, redge, data = dec.decompose_array(array, np.array([2, 3, 1]), bbox)
+
+    assert_array_equal(data[1].shape, np.array([3, 2, 1]))
+
+    gold_le = np.array([
+                       [-0.7, 1.5, 0.0], [-0.7, 1.6, 0.0],
+                       [-0.7, 1.8, 0.0], [-0.4, 1.5, 0.0],
+                       [-0.4, 1.6, 0.0], [-0.4, 1.8, 0.0]
+                       ])
+    assert_almost_equal(ledge, gold_le, 8)
+
+    gold_re = np.array(
+        [[-0.4, 1.6, 0.7], [-0.4, 1.8, 0.7],
+         [-0.4, 2.0, 0.7], [0.0, 1.6, 0.7],
+         [0.0, 1.8, 0.7], [0.0, 2.0, 0.7]]
+    )
+    assert_almost_equal(redge, gold_re, 8)
+
+
+def test_decomposition_3d():
+    array = np.ones((33, 35, 37))
+    bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+
+    ledge, redge, data = dec.decompose_array(array, np.array([3, 2, 2]), bbox)
+    assert_array_equal(data[0].shape, np.array([11, 17, 18]))
+
+    gold_le = np.array(
+        [[0.00000, -1.50000, 1.00000], [0.00000, -1.50000, 1.72973],
+         [0.00000, -0.04286, 1.00000], [0.00000, -0.04286, 1.72973],
+         [0.33333, -1.50000, 1.00000], [0.33333, -1.50000, 1.72973],
+         [0.33333, -0.04286, 1.00000], [0.33333, -0.04286, 1.72973],
+         [0.66667, -1.50000, 1.00000], [0.66667, -1.50000, 1.72973],
+         [0.66667, -0.04286, 1.00000], [0.66667, -0.04286, 1.72973]]
+    )
+    assert_almost_equal(ledge, gold_le, 5)
+
+    gold_re = np.array(
+        [[0.33333, -0.04286, 1.72973], [0.33333, -0.04286, 2.50000],
+         [0.33333, 1.50000, 1.72973], [0.33333, 1.50000, 2.50000],
+         [0.66667, -0.04286, 1.72973], [0.66667, -0.04286, 2.50000],
+         [0.66667, 1.50000, 1.72973], [0.66667, 1.50000, 2.50000],
+         [1.00000, -0.04286, 1.72973], [1.00000, -0.04286, 2.50000],
+         [1.00000, 1.50000, 1.72973], [1.00000, 1.50000, 2.50000]]
+    )
+    assert_almost_equal(redge, gold_re, 5)


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/utilities/tests/test_kdtrees.py
--- a/yt/utilities/tests/test_kdtrees.py
+++ b/yt/utilities/tests/test_kdtrees.py
@@ -26,10 +26,7 @@
 from yt.testing import *
 
 try:
-    from yt.utilities.kdtree import \
-        chainHOP_tags_dens, \
-        create_tree, fKD, find_nn_nearest_neighbors, \
-        free_tree, find_chunk_nearest_neighbors
+    from yt.utilities.kdtree.api import *
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 
@@ -39,10 +36,14 @@
     pass
 
 def test_fortran_tree():
-    # This test makes sure that the fortran kdtree is finding the correct
-    # nearest neighbors.
+    r"""This test makes sure that the fortran kdtree is finding the correct
+    nearest neighbors.
+    """
     # Four points.
-    fKD.pos = np.empty((3, 4), dtype='float64', order='F')
+    try:
+        fKD.pos = np.empty((3, 4), dtype='float64', order='F')
+    except NameError:
+        return
     # Make four points by hand that, in particular, will allow us to test
     # the periodicity of the kdtree.
     points = np.array([0.01, 0.5, 0.98, 0.99])
@@ -70,8 +71,9 @@
     assert_array_equal(fKD.tags, tags)
 
 def test_cython_tree():
-    # This test makes sure that the cython kdtree is finding the correct
-    # nearest neighbors.
+    r"""This test makes sure that the cython kdtree is finding the correct
+    nearest neighbors.
+    """
     # Four points.
     pos = np.empty((4, 3), dtype='float64')
     # Make four points by hand that, in particular, will allow us to test


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -213,7 +213,7 @@
 class ContourCallback(PlotCallback):
     _type_name = "contour"
     def __init__(self, field, ncont=5, factor=4, clim=None,
-                 plot_args = None):
+                 plot_args = None, label = False, label_args = None):
         """
         annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None,
                          plot_args = None):
@@ -232,6 +232,10 @@
         self.clim = clim
         if plot_args is None: plot_args = {'colors':'k'}
         self.plot_args = plot_args
+        self.label = label
+        if label_args is None:
+            label_args = {}
+        self.label_args = label_args
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -292,10 +296,14 @@
         if self.clim is not None: 
             self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
         
-        plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
+        cset = plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
+        
+        if self.label:
+            plot._axes.clabel(cset, **self.label_args)
+        
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
@@ -366,39 +374,30 @@
 
 class StreamlineCallback(PlotCallback):
     _type_name = "streamlines"
-    def __init__(self, field_x, field_y, factor=6.0, nx=16, ny=16,
-                 xstart=(0,1), ystart=(0,1), nsample=256,
-                 start_at_xedge=False, start_at_yedge=False,
-                 plot_args=None):
+    def __init__(self, field_x, field_y, factor = 16,
+                 density = 1, arrowsize = 1, arrowstyle = None,
+                 color = None, normalize = False):
         """
-        annotate_streamlines(field_x, field_y, factor=6.0, nx=16, ny=16,
-                             xstart=(0,1), ystart=(0,1), nsample=256,
-                             start_at_xedge=False, start_at_yedge=False,
-                             plot_args=None):
+        annotate_streamlines(field_x, field_y, factor = 16, density = 1,
+                             arrowsize = 1, arrowstyle = None,
+                             color = None, normalize = False):
 
         Add streamlines to any plot, using the *field_x* and *field_y*
-        from the associated data, using *nx* and *ny* starting points
-        that are bounded by *xstart* and *ystart*.  To begin
-        streamlines from the left edge of the plot, set
-        *start_at_xedge* to True; for the bottom edge, use
-        *start_at_yedge*.  A line with the qmean vector magnitude will
-        cover 1.0/*factor* of the image.
+        from the associated data, skipping every *factor* datapoints like
+        'quiver'. *density* is the index of the amount of the streamlines.
         """
         PlotCallback.__init__(self)
         self.field_x = field_x
         self.field_y = field_y
-        self.xstart = xstart
-        self.ystart = ystart
-        self.nsample = nsample
+        self.bv_x = self.bv_y = 0
         self.factor = factor
-        if start_at_xedge:
-            self.data_size = (1,ny)
-        elif start_at_yedge:
-            self.data_size = (nx,1)
-        else:
-            self.data_size = (nx,ny)
-        if plot_args is None: plot_args = {'color':'k', 'linestyle':'-'}
-        self.plot_args = plot_args
+        self.dens = density
+        self.arrowsize = arrowsize
+        if arrowstyle is None : arrowstyle='-|>'
+        self.arrowstyle = arrowstyle
+        if color is None : color = "#000000"
+        self.color = color
+        self.normalize = normalize
         
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -406,43 +405,31 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0]
-        ny = plot.image._A.shape[1]
+        nx = plot.image._A.shape[0] / self.factor
+        ny = plot.image._A.shape[1] / self.factor
         pixX = _MPL.Pixelize(plot.data['px'],
                              plot.data['py'],
                              plot.data['pdx'],
                              plot.data['pdy'],
-                             plot.data[self.field_x],
+                             plot.data[self.field_x] - self.bv_x,
                              int(nx), int(ny),
-                           (x0, x1, y0, y1),)
+                           (x0, x1, y0, y1),).transpose()
         pixY = _MPL.Pixelize(plot.data['px'],
                              plot.data['py'],
                              plot.data['pdx'],
                              plot.data['pdy'],
-                             plot.data[self.field_y],
+                             plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
-                           (x0, x1, y0, y1),)
-        r0 = np.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
-                      self.ystart[0]*ny:self.ystart[1]*ny:self.data_size[1]*1j]
-        lines = np.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
-        lines[0,:,:,:] = r0
-        mag = np.sqrt(pixX**2 + pixY**2)
-        scale = np.sqrt(nx*ny) / (self.factor * mag.mean())
-        dt = 1.0 / (self.nsample-1)
-        for i in range(1,self.nsample):
-            xt = lines[i-1,0,:,:]
-            yt = lines[i-1,1,:,:]
-            ix = np.maximum(np.minimum((xt).astype('int'), nx-1), 0)
-            iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
-            lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
-            lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
-        # scale into data units
-        lines[:,0,:,:] = lines[:,0,:,:] * (xx1 - xx0) / nx + xx0
-        lines[:,1,:,:] = lines[:,1,:,:] * (yy1 - yy0) / ny + yy0
-        for i in range(self.data_size[0]):
-            for j in range(self.data_size[1]):
-                plot._axes.plot(lines[:,0,i,j], lines[:,1,i,j],
-                                **self.plot_args)
+                           (x0, x1, y0, y1),).transpose()
+        X,Y = (na.linspace(xx0,xx1,nx,endpoint=True),
+                          na.linspace(yy0,yy1,ny,endpoint=True))
+        if self.normalize:
+            nn = na.sqrt(pixX**2 + pixY**2)
+            pixX /= nn
+            pixY /= nn
+        plot._axes.streamplot(X,Y, pixX, pixY, density=self.dens,
+                              arrowsize=self.arrowsize, arrowstyle=self.arrowstyle,
+                              color=self.color, norm=self.normalize)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)


diff -r 814a93ee320766d02a677b07a441cbc3dcc660a5 -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -146,7 +146,8 @@
     @parallel_passthrough
     def _finalize_parallel(self,data):
         self.streamlines = self.comm.mpi_allreduce(self.streamlines, op='sum')
-        self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
+        if self.get_magnitude:
+            self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
         
     def _integrate_through_brick(self, node, stream, step,
                                  periodic=False, mag=None):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/c88a92f30151/
changeset:   c88a92f30151
branch:      yt
user:        scopatz
date:        2012-10-23 21:55:26
summary:     bugfix for contour annotations.
affected #:  1 file

diff -r df3c48e34cc922e10cf3d1ed4b95bc126b70d193 -r c88a92f30151a943cd52feebdcf5485fda5ed3fa yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -294,7 +294,7 @@
             self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 
-            self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
+            self.ncont = np.linspace(self.clim[0], self.clim[1], self.ncont)
         
         cset = plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/6f48556cbe09/
changeset:   6f48556cbe09
branch:      yt
user:        scopatz
date:        2012-10-23 23:20:47
summary:     fixed yt/testing.py issue.
affected #:  1 file

diff -r c88a92f30151a943cd52feebdcf5485fda5ed3fa -r 6f48556cbe09a22d1d200de4b36985d660441f18 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -24,10 +24,12 @@
 
 import numpy as np
 from yt.funcs import *
-from numpy.testing import assert_array_equal, assert_equal, assert_almost_equal
+from numpy.testing import assert_array_equal, assert_almost_equal, \
+    assert_approx_equal, assert_array_almost_equal, assert_equal, \
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
 
-def assert_rel_equal(a1, a2, decimels):
-    return assert_almost_equal(a1/a2, 1.0, decimels)
+def assert_rel_equal(a1, a2, decimals):
+    return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 
@@ -137,11 +139,16 @@
         ndims = [ndims, ndims, ndims]
     else:
         assert(len(ndims) == 3)
-    if negative:
-        offset = 0.5
-    else:
-        offset = 0.0
+    if not iterable(negative):
+        negative = [negative for f in fields]
+    assert(len(fields) == len(negative))
+    offsets = []
+    for n in negative:
+        if n:
+            offsets.append(0.5)
+        else:
+            offsets.append(0.0)
     data = dict((field, (np.random.random(ndims) - offset) * peak_value)
-                 for field in fields)
+                 for field,offset in zip(fields,offsets))
     ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
     return ug



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5f5bfa8f287f/
changeset:   5f5bfa8f287f
branch:      yt
user:        scopatz
date:        2012-10-24 02:52:54
summary:     rm TimeCallback under Nathan's suggestion.
affected #:  1 file

diff -r 6f48556cbe09a22d1d200de4b36985d660441f18 -r 5f5bfa8f287f61321911b9e3ee2727aa692d89a8 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -450,30 +450,6 @@
         plot._axes.set_xlabel(self.label)
         plot._axes.set_ylabel(self.label)
 
-class TimeCallback(PlotCallback):
-    _type_name = "time"
-    def __init__(self, format_code='10.7e'):
-        """
-        This annotates the plot with the current simulation time.
-        For now, the time is displayed in seconds.
-        *format_code* can be optionally set, allowing a custom 
-        c-style format code for the time display.
-        """
-        self.format_code = format_code
-        PlotCallback.__init__(self)
-    
-    def __call__(self, plot):
-        current_time = plot.pf.current_time/plot.pf['Time']
-        timestring = format(current_time,self.format_code)
-        base = timestring[:timestring.find('e')]
-        exponent = timestring[timestring.find('e')+1:]
-        if exponent[0] == '+':
-            exponent = exponent[1:]
-        timestring = r'$t\/=\/'+base+''+r'\times\,10^{'+exponent+r'}\, \rm{s}$'
-        from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
-        at = AnchoredText(timestring, prop=dict(size=12), frameon=True, loc=4)
-        plot._axes.add_artist(at)
-
 def get_smallest_appropriate_unit(v, pf):
     max_nu = 1e30
     good_u = None



https://bitbucket.org/yt_analysis/yt-3.0/changeset/981d0cf55bb1/
changeset:   981d0cf55bb1
branch:      yt
user:        MatthewTurk
date:        2012-10-24 13:45:20
summary:     Merged in scopatz/yt (pull request #313)
affected #:  2 files



diff -r 221b5163bffa2e478a2b65d80090adf1be45bb2e -r 981d0cf55bb18724b63a29decb21fbd8d5d4af67 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -7,6 +7,8 @@
 Affiliation: UC Berkeley
 Author: Stephen Skory <s at skory.us>
 Affiliation: UC San Diego
+Author: Anthony Scopatz <scopatz at gmail.com>
+Affiliation: The University of Chicago
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008-2011 Matthew Turk, JS Oishi, Stephen Skory.  All Rights Reserved.
@@ -292,7 +294,7 @@
             self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 
-            self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
+            self.ncont = np.linspace(self.clim[0], self.clim[1], self.ncont)
         
         cset = plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
@@ -448,30 +450,6 @@
         plot._axes.set_xlabel(self.label)
         plot._axes.set_ylabel(self.label)
 
-class TimeCallback(PlotCallback):
-    _type_name = "time"
-    def __init__(self, format_code='10.7e'):
-        """
-        This annotates the plot with the current simulation time.
-        For now, the time is displayed in seconds.
-        *format_code* can be optionally set, allowing a custom 
-        c-style format code for the time display.
-        """
-        self.format_code = format_code
-        PlotCallback.__init__(self)
-    
-    def __call__(self, plot):
-        current_time = plot.pf.current_time/plot.pf['Time']
-        timestring = format(current_time,self.format_code)
-        base = timestring[:timestring.find('e')]
-        exponent = timestring[timestring.find('e')+1:]
-        if exponent[0] == '+':
-            exponent = exponent[1:]
-        timestring = r'$t\/=\/'+base+''+r'\times\,10^{'+exponent+r'}\, \rm{s}$'
-        from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
-        at = AnchoredText(timestring, prop=dict(size=12), frameon=True, loc=4)
-        plot._axes.add_artist(at)
-
 def get_smallest_appropriate_unit(v, pf):
     max_nu = 1e30
     good_u = None
@@ -1111,3 +1089,152 @@
     def __call__(self,plot):
         plot._axes.set_title(self.title)
 
+class FlashRayDataCallback(PlotCallback):
+    _type_name = "flash_ray_data"
+    def __init__(self, cmap_name='bone', sample=None):
+        """ 
+        annotate_flash_ray_data(cmap_name='bone', sample=None)
+
+        Adds ray trace data to the plot.  *cmap_name* is the name of the color map 
+        ('bone', 'jet', 'hot', etc).  *sample* dictates the amount of down sampling 
+        to do to prevent all of the rays from being  plotted.  This may be None 
+        (plot all rays, default), an integer (step size), or a slice object.
+        """
+        self.cmap_name = cmap_name
+        self.sample = sample if isinstance(sample, slice) else slice(None, None, sample)
+
+    def __call__(self, plot):
+        ray_data = plot.data.pf._handle["RayData"][:]
+        idx = ray_data[:,0].argsort(kind="mergesort")
+        ray_data = ray_data[idx]
+
+        tags = ray_data[:,0]
+        coords = ray_data[:,1:3]
+        power = ray_data[:,4]
+        power /= power.max()
+        cx, cy = self.convert_to_plot(plot, coords.T)
+        coords[:,0], coords[:,1] = cx, cy
+        splitidx = np.argwhere(0 < (tags[1:] - tags[:-1])) + 1
+        coords = np.split(coords, splitidx.flat)[self.sample]
+        power = np.split(power, splitidx.flat)[self.sample]
+        cmap = matplotlib.cm.get_cmap(self.cmap_name)
+
+        plot._axes.hold(True)
+        colors = [cmap(p.max()) for p in power]
+        lc = matplotlib.collections.LineCollection(coords, colors=colors)
+        plot._axes.add_collection(lc)
+        plot._axes.hold(False)
+
+
+class TimestampCallback(PlotCallback):
+    _type_name = "timestamp"
+    _time_conv = {
+          'as': 1e-18,
+          'attosec': 1e-18,
+          'attosecond': 1e-18,
+          'attoseconds': 1e-18,
+          'fs': 1e-15,
+          'femtosec': 1e-15,
+          'femtosecond': 1e-15,
+          'femtoseconds': 1e-15,
+          'ps': 1e-12,
+          'picosec': 1e-12,
+          'picosecond': 1e-12,
+          'picoseconds': 1e-12,
+          'ns': 1e-9,
+          'nanosec': 1e-9,
+          'nanosecond':1e-9,
+          'nanoseconds' : 1e-9,
+          'us': 1e-6,
+          'microsec': 1e-6,
+          'microsecond': 1e-6,
+          'microseconds': 1e-6,
+          'ms': 1e-3,
+          'millisec': 1e-3,
+          'millisecond': 1e-3,
+          'milliseconds': 1e-3,
+          's': 1.0,
+          'sec': 1.0,
+          'second':1.0,
+          'seconds': 1.0,
+          'm': 60.0,
+          'min': 60.0,
+          'minute': 60.0,
+          'minutes': 60.0,
+          'h': 3600.0,
+          'hour': 3600.0,
+          'hours': 3600.0,
+          'd': 86400.0,
+          'day': 86400.0,
+          'days': 86400.0,
+          'y': 86400.0*365.25,
+          'year': 86400.0*365.25,
+          'years': 86400.0*365.25,
+          'ev': 1e-9 * 7.6e-8 / 6.03,
+          'kev': 1e-12 * 7.6e-8 / 6.03,
+          'mev': 1e-15 * 7.6e-8 / 6.03,
+          }
+
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", **kwargs):
+        """ 
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
+
+        Adds the current time to the plot at point given by *x* and *y*.  If *units* 
+        is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 
+        *units* is None, it will attempt to figure out the correct value by which to 
+        scale.  The *format* keyword is a template string that will be evaluated and 
+        displayed on the plot.  All other *kwargs* will be passed to the text() 
+        method on the plot axes.  See matplotlib's text() functions for more 
+        information.
+        """
+        self.x = x
+        self.y = y
+        self.format = format
+        self.units = units
+        self.kwargs = {'color': 'w'}
+        self.kwargs.update(kwargs)
+
+    def __call__(self, plot):
+        if self.units is None:
+            t = plot.data.pf.current_time
+            scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
+            self.units = 's'
+            for k in scale_keys:
+                if t < self._time_conv[k]:
+                    break
+                self.units = k
+        t = plot.data.pf.current_time / self._time_conv[self.units.lower()]
+        if self.units == 'us':
+            self.units = '$\\mu s$'
+        s = self.format.format(time=t, units=self.units)
+        plot._axes.hold(True)
+        plot._axes.text(self.x, self.y, s, **self.kwargs)
+        plot._axes.hold(False)
+
+
+class MaterialBoundaryCallback(ContourCallback):
+    _type_name = "material_boundary"
+    def __init__(self, field='targ', ncont=1, factor=4, clim=(0.9, 1.0), **kwargs):
+        """ 
+        annotate_material_boundary(self, field='targ', ncont=1, factor=4, 
+                                   clim=(0.9, 1.0), **kwargs):
+
+        Add the limiting contours of *field* to the plot.  Nominally, *field* is 
+        the target material but may be any other field present in the hierarchy.
+        The number of contours generated is given by *ncount*, *factor* governs 
+        the number of points used in the interpolation, and *clim* gives the 
+        (upper, lower) limits for contouring.  For this to truly be the boundary
+        *clim* should be close to the edge.  For example the default is (0.9, 1.0)
+        for 'targ' which is defined on the range [0.0, 1.0].  All other *kwargs* 
+        will be passed to the contour() method on the plot axes.  See matplotlib
+        for more information.
+        """
+        plot_args = {'colors': 'w'}
+        plot_args.update(kwargs)
+        super(MaterialBoundaryCallback, self).__init__(field=field, ncont=ncont,
+                                                       factor=factor, clim=clim,
+                                                       plot_args=plot_args)
+
+    def __call__(self, plot):
+        super(MaterialBoundaryCallback, self).__call__(plot)
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/4e9c8d88540e/
changeset:   4e9c8d88540e
branch:      yt
user:        sskory
date:        2012-10-24 16:56:30
summary:     Modifying allsky projection to accept a data source.
affected #:  1 file

diff -r 329544ece9b19b441f2491e7dad059c848e9e318 -r 4e9c8d88540ec98458770db7686938b0efd9e504 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1449,7 +1449,7 @@
             yield self.snapshot()
 
 def allsky_projection(pf, center, radius, nside, field, weight = None,
-                      inner_radius = 10, rotation = None):
+                      inner_radius = 10, rotation = None, source = None):
     r"""Project through a parameter file, through an allsky-method
     decomposition from HEALpix, and return the image plane.
 
@@ -1484,6 +1484,9 @@
         If supplied, the vectors will be rotated by this.  You can construct
         this by, for instance, calling np.array([v1,v2,v3]) where those are the
         three reference planes of an orthogonal frame (see ortho_find).
+    source : data container, default None
+        If this is supplied, this gives the data source from which the all sky
+        projection pulls its data from.
 
     Returns
     -------
@@ -1527,12 +1530,20 @@
     positions += inner_radius * dx * vs
     vs *= radius
     uv = np.ones(3, dtype='float64')
-    grids = pf.h.sphere(center, radius)._grids
+    if source is not None:
+        grids = source._grids
+    else:
+        grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
                                 image, uv, uv, np.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
-        data = [grid[field] * grid.child_mask.astype('float64')
+        if source is not None:
+            data = [grid[field] * source._get_cut_mask(grid) * \
+                grid.child_mask.astype('float64')
+                for field in fields]
+        else:
+            data = [grid[field] * grid.child_mask.astype('float64')
                 for field in fields]
         pg = PartitionedGrid(
             grid.id, data,



https://bitbucket.org/yt_analysis/yt-3.0/changeset/4c9fee8d49ac/
changeset:   4c9fee8d49ac
branch:      yt
user:        MatthewTurk
date:        2012-10-24 17:03:41
summary:     Merged in sskory/yt (pull request #314)
affected #:  1 file

diff -r 981d0cf55bb18724b63a29decb21fbd8d5d4af67 -r 4c9fee8d49acfebfa2756f286d0352e85a4f6de2 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1449,7 +1449,7 @@
             yield self.snapshot()
 
 def allsky_projection(pf, center, radius, nside, field, weight = None,
-                      inner_radius = 10, rotation = None):
+                      inner_radius = 10, rotation = None, source = None):
     r"""Project through a parameter file, through an allsky-method
     decomposition from HEALpix, and return the image plane.
 
@@ -1484,6 +1484,9 @@
         If supplied, the vectors will be rotated by this.  You can construct
         this by, for instance, calling np.array([v1,v2,v3]) where those are the
         three reference planes of an orthogonal frame (see ortho_find).
+    source : data container, default None
+        If this is supplied, this gives the data source from which the all sky
+        projection pulls its data from.
 
     Returns
     -------
@@ -1527,12 +1530,20 @@
     positions += inner_radius * dx * vs
     vs *= radius
     uv = np.ones(3, dtype='float64')
-    grids = pf.h.sphere(center, radius)._grids
+    if source is not None:
+        grids = source._grids
+    else:
+        grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
                                 image, uv, uv, np.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
-        data = [grid[field] * grid.child_mask.astype('float64')
+        if source is not None:
+            data = [grid[field] * source._get_cut_mask(grid) * \
+                grid.child_mask.astype('float64')
+                for field in fields]
+        else:
+            data = [grid[field] * grid.child_mask.astype('float64')
                 for field in fields]
         pg = PartitionedGrid(
             grid.id, data,



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b6ec5694fc75/
changeset:   b6ec5694fc75
branch:      yt
user:        sskory
date:        2012-10-19 22:03:30
summary:     Fixed a small error with boolean objects.
affected #:  1 file

diff -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 -r b6ec5694fc75f05406c68448adf155b523112f36 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -71,7 +71,7 @@
 def force_array(item, shape):
     try:
         sh = item.shape
-        return item
+        return item.copy()
     except AttributeError:
         if item:
             return np.ones(shape, dtype='bool')



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b18f2082144d/
changeset:   b18f2082144d
branch:      yt
user:        sskory
date:        2012-10-20 00:50:02
summary:     Merge.
affected #:  1 file

diff -r b6ec5694fc75f05406c68448adf155b523112f36 -r b18f2082144d476ce06a6d03af5f9ea2b9ff2124 yt/data_objects/tests/test_extract_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -0,0 +1,53 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_cut_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        r = dd.cut_region( [ "grid['Temperature'] > 0.5",
+                             "grid['Density'] < 0.75",
+                             "grid['x-velocity'] > 0.25" ])
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        t2 = (r["Temperature"] < 0.75)
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+
+def test_extract_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        r = dd.extract_region(t)
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        t2 = (r["Temperature"] < 0.75)
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+        t3 = (r["Temperature"] < 0.75)
+        r3 = r.extract_region( t3 )
+        yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
+        yield assert_equal, np.all(r3["Temperature"] < 0.75), True



https://bitbucket.org/yt_analysis/yt-3.0/changeset/52637cee578f/
changeset:   52637cee578f
branch:      yt
user:        sskory
date:        2012-10-23 17:39:01
summary:     merge
affected #:  3 files

diff -r b18f2082144d476ce06a6d03af5f9ea2b9ff2124 -r 52637cee578ff0f290c12331eaf1220e74467d83 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -258,8 +258,11 @@
 
         """
         if isinstance(filenames, types.StringTypes):
+            pattern = filenames
             filenames = glob.glob(filenames)
             filenames.sort()
+            if len(filenames) == 0:
+                raise YTNoFilenamesMatchPattern(pattern)
         obj = cls(filenames[:], parallel = parallel)
         return obj
 


diff -r b18f2082144d476ce06a6d03af5f9ea2b9ff2124 -r 52637cee578ff0f290c12331eaf1220e74467d83 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -146,3 +146,11 @@
     def __str__(self):
         return "You must create an API key before uploading.  See " + \
                "https://data.yt-project.org/getting_started.html"
+
+class YTNoFilenamesMatchPattern(YTException):
+    def __init__(self, pattern):
+        self.pattern = pattern
+
+    def __str__(self):
+        return "No filenames were found to match the pattern: " + \
+               "'%s'" % (self.pattern)


diff -r b18f2082144d476ce06a6d03af5f9ea2b9ff2124 -r 52637cee578ff0f290c12331eaf1220e74467d83 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -211,7 +211,7 @@
 class ContourCallback(PlotCallback):
     _type_name = "contour"
     def __init__(self, field, ncont=5, factor=4, clim=None,
-                 plot_args = None):
+                 plot_args = None, label = False, label_args = None):
         """
         annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None,
                          plot_args = None):
@@ -230,6 +230,10 @@
         self.clim = clim
         if plot_args is None: plot_args = {'colors':'k'}
         self.plot_args = plot_args
+        self.label = label
+        if label_args is None:
+            label_args = {}
+        self.label_args = label_args
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -290,10 +294,14 @@
         if self.clim is not None: 
             self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
         
-        plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
+        cset = plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
+        
+        if self.label:
+            plot._axes.clabel(cset, **self.label_args)
+        
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/97aea7f156df/
changeset:   97aea7f156df
branch:      yt
user:        sskory
date:        2012-10-23 17:39:19
summary:     Adding tests for boolean objects.
affected #:  1 file

diff -r 52637cee578ff0f290c12331eaf1220e74467d83 -r 97aea7f156dfbab8080028fed481f49b8480669c yt/data_objects/tests/test_boolean_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -0,0 +1,348 @@
+from yt.testing import *
+from yt.mods import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    def _ID(field, data):
+        width = data.pf.domain_right_edge - data.pf.domain_left_edge
+        delta = width / data.pf.h.get_smallest_dx()
+        x = data['x'] - data.pf.h.get_smallest_dx() / 2.
+        y = data['y'] - data.pf.h.get_smallest_dx() / 2.
+        z = data['z'] - data.pf.h.get_smallest_dx() / 2.
+        xi = x / data.pf.h.get_smallest_dx()
+        yi = y / data.pf.h.get_smallest_dx()
+        zi = z / data.pf.h.get_smallest_dx()
+        index = xi + delta[0] * (yi + delta[1] * zi)
+        index = index.astype('int64')
+        return index
+
+    add_field("ID", function=_ID)
+
+def test_boolean_spheres_no_overlap():
+    r"""Test to make sure that boolean objects (spheres, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping spheres. This also checks that the original spheres
+    don't change as part of constructing the booleans.
+    """
+    pf = fake_random_pf(64)
+    pf.h
+    sp1 = pf.h.sphere([0.25, 0.25, 0.25], 0.15)
+    sp2 = pf.h.sphere([0.75, 0.75, 0.75], 0.15)
+    # Store the original indices
+    i1 = sp1['ID']
+    i1.sort()
+    i2 = sp2['ID']
+    i2.sort()
+    ii = np.concatenate((i1, i2))
+    ii.sort()
+    # Make some booleans
+    bo1 = pf.h.boolean([sp1, "AND", sp2]) # empty
+    bo2 = pf.h.boolean([sp1, "NOT", sp2]) # only sp1
+    bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination
+    # This makes sure the original containers didn't change.
+    new_i1 = sp1['ID']
+    new_i1.sort()
+    new_i2 = sp2['ID']
+    new_i2.sort()
+    assert_equal(new_i1, i1)
+    assert_equal(new_i2, i2)
+    # Now make sure the indices also behave as we expect.
+    empty = np.array([])
+    assert_array_equal(bo1['ID'], empty)
+    b2 = bo2['ID']
+    b2.sort()
+    assert_array_equal(b2, i1)
+    b3 = bo3['ID']
+    b3.sort()
+    assert_array_equal(b3, ii)
+ 
+def test_boolean_spheres_overlap():
+    r"""Test to make sure that boolean objects (spheres, overlap)
+    behave the way we expect.
+
+    Test overlapping spheres.
+    """
+    pf = fake_random_pf(64)
+    pf.h
+    sp1 = pf.h.sphere([0.45, 0.45, 0.45], 0.15)
+    sp2 = pf.h.sphere([0.55, 0.55, 0.55], 0.15)
+    # Get indices of both.
+    i1 = sp1['ID']
+    i2 = sp2['ID']
+    # Make some booleans
+    bo1 = pf.h.boolean([sp1, "AND", sp2]) # overlap (a lens)
+    bo2 = pf.h.boolean([sp1, "NOT", sp2]) # sp1 - sp2 (sphere with bite)
+    bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination (H2)
+    # Now make sure the indices also behave as we expect.
+    lens = np.intersect1d(i1, i2)
+    apple = np.setdiff1d(i1, i2)
+    both = np.union1d(i1, i2)
+    b1 = bo1['ID']
+    b1.sort()
+    b2 = bo2['ID']
+    b2.sort()
+    b3 = bo3['ID']
+    b3.sort()
+    assert_array_equal(b1, lens)
+    assert_array_equal(b2, apple)
+    assert_array_equal(b3, both)
+
+def test_boolean_regions_no_overlap():
+    r"""Test to make sure that boolean objects (regions, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping regions. This also checks that the original regions
+    don't change as part of constructing the booleans.
+    """
+    pf = fake_random_pf(64)
+    pf.h
+    re1 = pf.h.region([0.25]*3, [0.2]*3, [0.3]*3)
+    re2 = pf.h.region([0.65]*3, [0.6]*3, [0.7]*3)
+    # Store the original indices
+    i1 = re1['ID']
+    i1.sort()
+    i2 = re2['ID']
+    i2.sort()
+    ii = np.concatenate((i1, i2))
+    ii.sort()
+    # Make some booleans
+    bo1 = pf.h.boolean([re1, "AND", re2]) # empty
+    bo2 = pf.h.boolean([re1, "NOT", re2]) # only re1
+    bo3 = pf.h.boolean([re1, "OR", re2]) # combination
+    # This makes sure the original containers didn't change.
+    new_i1 = re1['ID']
+    new_i1.sort()
+    new_i2 = re2['ID']
+    new_i2.sort()
+    assert_equal(new_i1, i1)
+    assert_equal(new_i2, i2)
+    # Now make sure the indices also behave as we expect.
+    empty = np.array([])
+    assert_array_equal(bo1['ID'], empty)
+    b2 = bo2['ID']
+    b2.sort()
+    assert_array_equal(b2, i1)
+    b3 = bo3['ID']
+    b3.sort()
+    assert_array_equal(b3, ii)
+
+def test_boolean_regions_overlap():
+    r"""Test to make sure that boolean objects (regions, overlap)
+    behave the way we expect.
+
+    Test overlapping regions.
+    """
+    pf = fake_random_pf(64)
+    pf.h
+    re1 = pf.h.region([0.55]*3, [0.5]*3, [0.6]*3)
+    re2 = pf.h.region([0.6]*3, [0.55]*3, [0.65]*3)
+    # Get indices of both.
+    i1 = re1['ID']
+    i2 = re2['ID']
+    # Make some booleans
+    bo1 = pf.h.boolean([re1, "AND", re2]) # overlap (small cube)
+    bo2 = pf.h.boolean([re1, "NOT", re2]) # sp1 - sp2 (large cube with bite)
+    bo3 = pf.h.boolean([re1, "OR", re2]) # combination (merged large cubes)
+    # Now make sure the indices also behave as we expect.
+    cube = np.intersect1d(i1, i2)
+    bite_cube = np.setdiff1d(i1, i2)
+    both = np.union1d(i1, i2)
+    b1 = bo1['ID']
+    b1.sort()
+    b2 = bo2['ID']
+    b2.sort()
+    b3 = bo3['ID']
+    b3.sort()
+    assert_array_equal(b1, cube)
+    assert_array_equal(b2, bite_cube)
+    assert_array_equal(b3, both)
+
+def test_boolean_cylinders_no_overlap():
+    r"""Test to make sure that boolean objects (cylinders, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping cylinders. This also checks that the original cylinders
+    don't change as part of constructing the booleans.
+    """
+    pf = fake_random_pf(64)
+    pf.h
+    cyl1 = pf.h.disk([0.25]*3, [1, 0, 0], 0.1, 0.1)
+    cyl2 = pf.h.disk([0.75]*3, [1, 0, 0], 0.1, 0.1)
+    # Store the original indices
+    i1 = cyl1['ID']
+    i1.sort()
+    i2 = cyl2['ID']
+    i2.sort()
+    ii = np.concatenate((i1, i2))
+    ii.sort()
+    # Make some booleans
+    bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # empty
+    bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # only cyl1
+    bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination
+    # This makes sure the original containers didn't change.
+    new_i1 = cyl1['ID']
+    new_i1.sort()
+    new_i2 = cyl2['ID']
+    new_i2.sort()
+    assert_equal(new_i1, i1)
+    assert_equal(new_i2, i2)
+    # Now make sure the indices also behave as we expect.
+    empty = np.array([])
+    assert_array_equal(bo1['ID'], empty)
+    b2 = bo2['ID']
+    b2.sort()
+    assert_array_equal(b2, i1)
+    b3 = bo3['ID']
+    b3.sort()
+    assert_array_equal(b3, ii)
+
+def test_boolean_cylinders_overlap():
+    r"""Test to make sure that boolean objects (cylinders, overlap)
+    behave the way we expect.
+
+    Test overlapping cylinders.
+    """
+    pf = fake_random_pf(64)
+    pf.h
+    cyl1 = pf.h.disk([0.45]*3, [1, 0, 0], 0.2, 0.2)
+    cyl2 = pf.h.disk([0.55]*3, [1, 0, 0], 0.2, 0.2)
+    # Get indices of both.
+    i1 = cyl1['ID']
+    i2 = cyl2['ID']
+    # Make some booleans
+    bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # overlap (vertically extened lens)
+    bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # sp1 - sp2 (disk minus a bite)
+    bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination (merged disks)
+    # Now make sure the indices also behave as we expect.
+    vlens = np.intersect1d(i1, i2)
+    bite_disk = np.setdiff1d(i1, i2)
+    both = np.union1d(i1, i2)
+    b1 = bo1['ID']
+    b1.sort()
+    b2 = bo2['ID']
+    b2.sort()
+    b3 = bo3['ID']
+    b3.sort()
+    assert_array_equal(b1, vlens)
+    assert_array_equal(b2, bite_disk)
+    assert_array_equal(b3, both)
+
+def test_boolean_ellipsoids_no_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping ellipsoids. This also checks that the original
+    ellipsoids don't change as part of constructing the booleans.
+    """
+    pf = fake_random_pf(64)
+    pf.h
+    ell1 = pf.h.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+        np.array([0.1]*3))
+    ell2 = pf.h.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+        np.array([0.1]*3))
+    # Store the original indices
+    i1 = ell1['ID']
+    i1.sort()
+    i2 = ell2['ID']
+    i2.sort()
+    ii = np.concatenate((i1, i2))
+    ii.sort()
+    # Make some booleans
+    bo1 = pf.h.boolean([ell1, "AND", ell2]) # empty
+    bo2 = pf.h.boolean([ell1, "NOT", ell2]) # only cyl1
+    bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+    # This makes sure the original containers didn't change.
+    new_i1 = ell1['ID']
+    new_i1.sort()
+    new_i2 = ell2['ID']
+    new_i2.sort()
+    assert_equal(new_i1, i1)
+    assert_equal(new_i2, i2)
+    # Now make sure the indices also behave as we expect.
+    empty = np.array([])
+    assert_array_equal(bo1['ID'], empty)
+    b2 = bo2['ID']
+    b2.sort()
+    assert_array_equal(b2, i1)
+    b3 = bo3['ID']
+    b3.sort()
+    assert_array_equal(b3, ii)
+
+def test_boolean_ellipsoids_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, overlap)
+    behave the way we expect.
+
+    Test overlapping ellipsoids.
+    """
+    pf = fake_random_pf(64)
+    pf.h
+    ell1 = pf.h.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+        np.array([0.1]*3))
+    ell2 = pf.h.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+        np.array([0.1]*3))
+    # Get indices of both.
+    i1 = ell1['ID']
+    i2 = ell2['ID']
+    # Make some booleans
+    bo1 = pf.h.boolean([ell1, "AND", ell2]) # overlap
+    bo2 = pf.h.boolean([ell1, "NOT", ell2]) # ell1 - ell2
+    bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+    # Now make sure the indices also behave as we expect.
+    overlap = np.intersect1d(i1, i2)
+    diff = np.setdiff1d(i1, i2)
+    both = np.union1d(i1, i2)
+    b1 = bo1['ID']
+    b1.sort()
+    b2 = bo2['ID']
+    b2.sort()
+    b3 = bo3['ID']
+    b3.sort()
+    assert_array_equal(b1, overlap)
+    assert_array_equal(b2, diff)
+    assert_array_equal(b3, both)
+
+def test_boolean_mix_periodicity():
+    r"""Test that a hybrid boolean region behaves as we expect.
+
+    This also tests nested logic and that periodicity works.
+    """
+    pf = fake_random_pf(64)
+    pf.h
+    re = pf.h.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
+    sp = pf.h.sphere([0.95]*3, 0.3) # wraps around
+    cyl = pf.h.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around
+    ell = pf.h.ellipsoid([0.35]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+        np.array([0.1]*3)) # no wrap
+    # Get original indices
+    rei = re['ID']
+    spi = sp['ID']
+    cyli = cyl['ID']
+    elli = ell['ID']
+    # Make some booleans
+    # whole box minux spherical bites at corners
+    bo1 = pf.h.boolean([re, "NOT", sp])
+    # sphere plus cylinder
+    bo2 = pf.h.boolean([sp, "OR", cyl])
+    # a big jumble, the region minus the ell+cyl (scepter shaped?), plus the
+    # sphere which should add back some of what the ell+cyl took out.
+    bo3 = pf.h.boolean([re, "NOT", "(", ell, "OR", cyl, ")", "OR", sp])
+    # Now make sure the indices also behave as we expect.
+    expect = np.setdiff1d(rei, spi)
+    ii = bo1['ID']
+    ii.sort()
+    assert_array_equal(expect, ii)
+    #
+    expect = np.union1d(spi, cyli)
+    ii = bo2['ID']
+    ii.sort()
+    assert_array_equal(expect, ii)
+    #
+    expect = np.union1d(elli, cyli)
+    expect = np.setdiff1d(rei, expect)
+    expect = np.union1d(expect, spi)
+    ii = bo3['ID']
+    ii.sort()
+    assert_array_equal(expect, ii)
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/88d1492cc7e4/
changeset:   88d1492cc7e4
branch:      yt
user:        sskory
date:        2012-10-23 17:50:32
summary:     Fixing import.
affected #:  1 file

diff -r 97aea7f156dfbab8080028fed481f49b8480669c -r 88d1492cc7e418e26b95a69dab03f9489918b167 yt/data_objects/tests/test_boolean_regions.py
--- a/yt/data_objects/tests/test_boolean_regions.py
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -1,5 +1,5 @@
 from yt.testing import *
-from yt.mods import *
+from yt.data_objects.api import add_field
 
 def setup():
     from yt.config import ytcfg



https://bitbucket.org/yt_analysis/yt-3.0/changeset/dfaaede9f097/
changeset:   dfaaede9f097
branch:      yt
user:        sskory
date:        2012-10-23 18:10:59
summary:     Added nprocs and yield to boolean tests.
affected #:  1 file

diff -r 88d1492cc7e418e26b95a69dab03f9489918b167 -r dfaaede9f09714cd4a2caa51267fc5cbc946fe7e yt/data_objects/tests/test_boolean_regions.py
--- a/yt/data_objects/tests/test_boolean_regions.py
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -26,37 +26,38 @@
     Test non-overlapping spheres. This also checks that the original spheres
     don't change as part of constructing the booleans.
     """
-    pf = fake_random_pf(64)
-    pf.h
-    sp1 = pf.h.sphere([0.25, 0.25, 0.25], 0.15)
-    sp2 = pf.h.sphere([0.75, 0.75, 0.75], 0.15)
-    # Store the original indices
-    i1 = sp1['ID']
-    i1.sort()
-    i2 = sp2['ID']
-    i2.sort()
-    ii = np.concatenate((i1, i2))
-    ii.sort()
-    # Make some booleans
-    bo1 = pf.h.boolean([sp1, "AND", sp2]) # empty
-    bo2 = pf.h.boolean([sp1, "NOT", sp2]) # only sp1
-    bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination
-    # This makes sure the original containers didn't change.
-    new_i1 = sp1['ID']
-    new_i1.sort()
-    new_i2 = sp2['ID']
-    new_i2.sort()
-    assert_equal(new_i1, i1)
-    assert_equal(new_i2, i2)
-    # Now make sure the indices also behave as we expect.
-    empty = np.array([])
-    assert_array_equal(bo1['ID'], empty)
-    b2 = bo2['ID']
-    b2.sort()
-    assert_array_equal(b2, i1)
-    b3 = bo3['ID']
-    b3.sort()
-    assert_array_equal(b3, ii)
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.25, 0.25, 0.25], 0.15)
+        sp2 = pf.h.sphere([0.75, 0.75, 0.75], 0.15)
+        # Store the original indices
+        i1 = sp1['ID']
+        i1.sort()
+        i2 = sp2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # empty
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # only sp1
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = sp1['ID']
+        new_i1.sort()
+        new_i2 = sp2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
  
 def test_boolean_spheres_overlap():
     r"""Test to make sure that boolean objects (spheres, overlap)
@@ -64,30 +65,31 @@
 
     Test overlapping spheres.
     """
-    pf = fake_random_pf(64)
-    pf.h
-    sp1 = pf.h.sphere([0.45, 0.45, 0.45], 0.15)
-    sp2 = pf.h.sphere([0.55, 0.55, 0.55], 0.15)
-    # Get indices of both.
-    i1 = sp1['ID']
-    i2 = sp2['ID']
-    # Make some booleans
-    bo1 = pf.h.boolean([sp1, "AND", sp2]) # overlap (a lens)
-    bo2 = pf.h.boolean([sp1, "NOT", sp2]) # sp1 - sp2 (sphere with bite)
-    bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination (H2)
-    # Now make sure the indices also behave as we expect.
-    lens = np.intersect1d(i1, i2)
-    apple = np.setdiff1d(i1, i2)
-    both = np.union1d(i1, i2)
-    b1 = bo1['ID']
-    b1.sort()
-    b2 = bo2['ID']
-    b2.sort()
-    b3 = bo3['ID']
-    b3.sort()
-    assert_array_equal(b1, lens)
-    assert_array_equal(b2, apple)
-    assert_array_equal(b3, both)
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.45, 0.45, 0.45], 0.15)
+        sp2 = pf.h.sphere([0.55, 0.55, 0.55], 0.15)
+        # Get indices of both.
+        i1 = sp1['ID']
+        i2 = sp2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # overlap (a lens)
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # sp1 - sp2 (sphere with bite)
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination (H2)
+        # Now make sure the indices also behave as we expect.
+        lens = np.intersect1d(i1, i2)
+        apple = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, lens
+        yield assert_array_equal, b2, apple
+        yield assert_array_equal, b3, both
 
 def test_boolean_regions_no_overlap():
     r"""Test to make sure that boolean objects (regions, no overlap)
@@ -96,37 +98,38 @@
     Test non-overlapping regions. This also checks that the original regions
     don't change as part of constructing the booleans.
     """
-    pf = fake_random_pf(64)
-    pf.h
-    re1 = pf.h.region([0.25]*3, [0.2]*3, [0.3]*3)
-    re2 = pf.h.region([0.65]*3, [0.6]*3, [0.7]*3)
-    # Store the original indices
-    i1 = re1['ID']
-    i1.sort()
-    i2 = re2['ID']
-    i2.sort()
-    ii = np.concatenate((i1, i2))
-    ii.sort()
-    # Make some booleans
-    bo1 = pf.h.boolean([re1, "AND", re2]) # empty
-    bo2 = pf.h.boolean([re1, "NOT", re2]) # only re1
-    bo3 = pf.h.boolean([re1, "OR", re2]) # combination
-    # This makes sure the original containers didn't change.
-    new_i1 = re1['ID']
-    new_i1.sort()
-    new_i2 = re2['ID']
-    new_i2.sort()
-    assert_equal(new_i1, i1)
-    assert_equal(new_i2, i2)
-    # Now make sure the indices also behave as we expect.
-    empty = np.array([])
-    assert_array_equal(bo1['ID'], empty)
-    b2 = bo2['ID']
-    b2.sort()
-    assert_array_equal(b2, i1)
-    b3 = bo3['ID']
-    b3.sort()
-    assert_array_equal(b3, ii)
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.25]*3, [0.2]*3, [0.3]*3)
+        re2 = pf.h.region([0.65]*3, [0.6]*3, [0.7]*3)
+        # Store the original indices
+        i1 = re1['ID']
+        i1.sort()
+        i2 = re2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # empty
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # only re1
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = re1['ID']
+        new_i1.sort()
+        new_i2 = re2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1 
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
 
 def test_boolean_regions_overlap():
     r"""Test to make sure that boolean objects (regions, overlap)
@@ -134,30 +137,31 @@
 
     Test overlapping regions.
     """
-    pf = fake_random_pf(64)
-    pf.h
-    re1 = pf.h.region([0.55]*3, [0.5]*3, [0.6]*3)
-    re2 = pf.h.region([0.6]*3, [0.55]*3, [0.65]*3)
-    # Get indices of both.
-    i1 = re1['ID']
-    i2 = re2['ID']
-    # Make some booleans
-    bo1 = pf.h.boolean([re1, "AND", re2]) # overlap (small cube)
-    bo2 = pf.h.boolean([re1, "NOT", re2]) # sp1 - sp2 (large cube with bite)
-    bo3 = pf.h.boolean([re1, "OR", re2]) # combination (merged large cubes)
-    # Now make sure the indices also behave as we expect.
-    cube = np.intersect1d(i1, i2)
-    bite_cube = np.setdiff1d(i1, i2)
-    both = np.union1d(i1, i2)
-    b1 = bo1['ID']
-    b1.sort()
-    b2 = bo2['ID']
-    b2.sort()
-    b3 = bo3['ID']
-    b3.sort()
-    assert_array_equal(b1, cube)
-    assert_array_equal(b2, bite_cube)
-    assert_array_equal(b3, both)
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.55]*3, [0.5]*3, [0.6]*3)
+        re2 = pf.h.region([0.6]*3, [0.55]*3, [0.65]*3)
+        # Get indices of both.
+        i1 = re1['ID']
+        i2 = re2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # overlap (small cube)
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # sp1 - sp2 (large cube with bite)
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination (merged large cubes)
+        # Now make sure the indices also behave as we expect.
+        cube = np.intersect1d(i1, i2)
+        bite_cube = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, cube
+        yield assert_array_equal, b2, bite_cube
+        yield assert_array_equal, b3, both
 
 def test_boolean_cylinders_no_overlap():
     r"""Test to make sure that boolean objects (cylinders, no overlap)
@@ -166,37 +170,38 @@
     Test non-overlapping cylinders. This also checks that the original cylinders
     don't change as part of constructing the booleans.
     """
-    pf = fake_random_pf(64)
-    pf.h
-    cyl1 = pf.h.disk([0.25]*3, [1, 0, 0], 0.1, 0.1)
-    cyl2 = pf.h.disk([0.75]*3, [1, 0, 0], 0.1, 0.1)
-    # Store the original indices
-    i1 = cyl1['ID']
-    i1.sort()
-    i2 = cyl2['ID']
-    i2.sort()
-    ii = np.concatenate((i1, i2))
-    ii.sort()
-    # Make some booleans
-    bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # empty
-    bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # only cyl1
-    bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination
-    # This makes sure the original containers didn't change.
-    new_i1 = cyl1['ID']
-    new_i1.sort()
-    new_i2 = cyl2['ID']
-    new_i2.sort()
-    assert_equal(new_i1, i1)
-    assert_equal(new_i2, i2)
-    # Now make sure the indices also behave as we expect.
-    empty = np.array([])
-    assert_array_equal(bo1['ID'], empty)
-    b2 = bo2['ID']
-    b2.sort()
-    assert_array_equal(b2, i1)
-    b3 = bo3['ID']
-    b3.sort()
-    assert_array_equal(b3, ii)
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.25]*3, [1, 0, 0], 0.1, 0.1)
+        cyl2 = pf.h.disk([0.75]*3, [1, 0, 0], 0.1, 0.1)
+        # Store the original indices
+        i1 = cyl1['ID']
+        i1.sort()
+        i2 = cyl2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # empty
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # only cyl1
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = cyl1['ID']
+        new_i1.sort()
+        new_i2 = cyl2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
 
 def test_boolean_cylinders_overlap():
     r"""Test to make sure that boolean objects (cylinders, overlap)
@@ -204,30 +209,31 @@
 
     Test overlapping cylinders.
     """
-    pf = fake_random_pf(64)
-    pf.h
-    cyl1 = pf.h.disk([0.45]*3, [1, 0, 0], 0.2, 0.2)
-    cyl2 = pf.h.disk([0.55]*3, [1, 0, 0], 0.2, 0.2)
-    # Get indices of both.
-    i1 = cyl1['ID']
-    i2 = cyl2['ID']
-    # Make some booleans
-    bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # overlap (vertically extened lens)
-    bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # sp1 - sp2 (disk minus a bite)
-    bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination (merged disks)
-    # Now make sure the indices also behave as we expect.
-    vlens = np.intersect1d(i1, i2)
-    bite_disk = np.setdiff1d(i1, i2)
-    both = np.union1d(i1, i2)
-    b1 = bo1['ID']
-    b1.sort()
-    b2 = bo2['ID']
-    b2.sort()
-    b3 = bo3['ID']
-    b3.sort()
-    assert_array_equal(b1, vlens)
-    assert_array_equal(b2, bite_disk)
-    assert_array_equal(b3, both)
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.45]*3, [1, 0, 0], 0.2, 0.2)
+        cyl2 = pf.h.disk([0.55]*3, [1, 0, 0], 0.2, 0.2)
+        # Get indices of both.
+        i1 = cyl1['ID']
+        i2 = cyl2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # overlap (vertically extened lens)
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # sp1 - sp2 (disk minus a bite)
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination (merged disks)
+        # Now make sure the indices also behave as we expect.
+        vlens = np.intersect1d(i1, i2)
+        bite_disk = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, vlens
+        yield assert_array_equal, b2, bite_disk
+        yield assert_array_equal, b3, both
 
 def test_boolean_ellipsoids_no_overlap():
     r"""Test to make sure that boolean objects (ellipsoids, no overlap)
@@ -236,39 +242,40 @@
     Test non-overlapping ellipsoids. This also checks that the original
     ellipsoids don't change as part of constructing the booleans.
     """
-    pf = fake_random_pf(64)
-    pf.h
-    ell1 = pf.h.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
-        np.array([0.1]*3))
-    ell2 = pf.h.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
-        np.array([0.1]*3))
-    # Store the original indices
-    i1 = ell1['ID']
-    i1.sort()
-    i2 = ell2['ID']
-    i2.sort()
-    ii = np.concatenate((i1, i2))
-    ii.sort()
-    # Make some booleans
-    bo1 = pf.h.boolean([ell1, "AND", ell2]) # empty
-    bo2 = pf.h.boolean([ell1, "NOT", ell2]) # only cyl1
-    bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
-    # This makes sure the original containers didn't change.
-    new_i1 = ell1['ID']
-    new_i1.sort()
-    new_i2 = ell2['ID']
-    new_i2.sort()
-    assert_equal(new_i1, i1)
-    assert_equal(new_i2, i2)
-    # Now make sure the indices also behave as we expect.
-    empty = np.array([])
-    assert_array_equal(bo1['ID'], empty)
-    b2 = bo2['ID']
-    b2.sort()
-    assert_array_equal(b2, i1)
-    b3 = bo3['ID']
-    b3.sort()
-    assert_array_equal(b3, ii)
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Store the original indices
+        i1 = ell1['ID']
+        i1.sort()
+        i2 = ell2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # empty
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # only cyl1
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = ell1['ID']
+        new_i1.sort()
+        new_i2 = ell2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1 
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
 
 def test_boolean_ellipsoids_overlap():
     r"""Test to make sure that boolean objects (ellipsoids, overlap)
@@ -276,73 +283,75 @@
 
     Test overlapping ellipsoids.
     """
-    pf = fake_random_pf(64)
-    pf.h
-    ell1 = pf.h.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
-        np.array([0.1]*3))
-    ell2 = pf.h.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
-        np.array([0.1]*3))
-    # Get indices of both.
-    i1 = ell1['ID']
-    i2 = ell2['ID']
-    # Make some booleans
-    bo1 = pf.h.boolean([ell1, "AND", ell2]) # overlap
-    bo2 = pf.h.boolean([ell1, "NOT", ell2]) # ell1 - ell2
-    bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
-    # Now make sure the indices also behave as we expect.
-    overlap = np.intersect1d(i1, i2)
-    diff = np.setdiff1d(i1, i2)
-    both = np.union1d(i1, i2)
-    b1 = bo1['ID']
-    b1.sort()
-    b2 = bo2['ID']
-    b2.sort()
-    b3 = bo3['ID']
-    b3.sort()
-    assert_array_equal(b1, overlap)
-    assert_array_equal(b2, diff)
-    assert_array_equal(b3, both)
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Get indices of both.
+        i1 = ell1['ID']
+        i2 = ell2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # overlap
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # ell1 - ell2
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # Now make sure the indices also behave as we expect.
+        overlap = np.intersect1d(i1, i2)
+        diff = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, overlap
+        yield assert_array_equal, b2, diff
+        yield assert_array_equal, b3, both
 
 def test_boolean_mix_periodicity():
     r"""Test that a hybrid boolean region behaves as we expect.
 
     This also tests nested logic and that periodicity works.
     """
-    pf = fake_random_pf(64)
-    pf.h
-    re = pf.h.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
-    sp = pf.h.sphere([0.95]*3, 0.3) # wraps around
-    cyl = pf.h.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around
-    ell = pf.h.ellipsoid([0.35]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
-        np.array([0.1]*3)) # no wrap
-    # Get original indices
-    rei = re['ID']
-    spi = sp['ID']
-    cyli = cyl['ID']
-    elli = ell['ID']
-    # Make some booleans
-    # whole box minux spherical bites at corners
-    bo1 = pf.h.boolean([re, "NOT", sp])
-    # sphere plus cylinder
-    bo2 = pf.h.boolean([sp, "OR", cyl])
-    # a big jumble, the region minus the ell+cyl (scepter shaped?), plus the
-    # sphere which should add back some of what the ell+cyl took out.
-    bo3 = pf.h.boolean([re, "NOT", "(", ell, "OR", cyl, ")", "OR", sp])
-    # Now make sure the indices also behave as we expect.
-    expect = np.setdiff1d(rei, spi)
-    ii = bo1['ID']
-    ii.sort()
-    assert_array_equal(expect, ii)
-    #
-    expect = np.union1d(spi, cyli)
-    ii = bo2['ID']
-    ii.sort()
-    assert_array_equal(expect, ii)
-    #
-    expect = np.union1d(elli, cyli)
-    expect = np.setdiff1d(rei, expect)
-    expect = np.union1d(expect, spi)
-    ii = bo3['ID']
-    ii.sort()
-    assert_array_equal(expect, ii)
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re = pf.h.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
+        sp = pf.h.sphere([0.95]*3, 0.3) # wraps around
+        cyl = pf.h.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around
+        ell = pf.h.ellipsoid([0.35]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3)) # no wrap
+        # Get original indices
+        rei = re['ID']
+        spi = sp['ID']
+        cyli = cyl['ID']
+        elli = ell['ID']
+        # Make some booleans
+        # whole box minux spherical bites at corners
+        bo1 = pf.h.boolean([re, "NOT", sp])
+        # sphere plus cylinder
+        bo2 = pf.h.boolean([sp, "OR", cyl])
+        # a big jumble, the region minus the ell+cyl (scepter shaped?), plus the
+        # sphere which should add back some of what the ell+cyl took out.
+        bo3 = pf.h.boolean([re, "NOT", "(", ell, "OR", cyl, ")", "OR", sp])
+        # Now make sure the indices also behave as we expect.
+        expect = np.setdiff1d(rei, spi)
+        ii = bo1['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(spi, cyli)
+        ii = bo2['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(elli, cyli)
+        expect = np.setdiff1d(rei, expect)
+        expect = np.union1d(expect, spi)
+        ii = bo3['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a4bb699ba3cd/
changeset:   a4bb699ba3cd
branch:      yt
user:        sskory
date:        2012-10-24 01:16:02
summary:     Merge.
affected #:  12 files



diff -r dfaaede9f09714cd4a2caa51267fc5cbc946fe7e -r a4bb699ba3cd642a7efe76ecbf06e29a38d2a10a setup.py
--- a/setup.py
+++ b/setup.py
@@ -154,7 +154,11 @@
             'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
                             'yt = yt.utilities.command_line:run_main',
-                       ]},
+                      ],
+                      'nose.plugins.0.10': [
+                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+                      ]
+        },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",


diff -r dfaaede9f09714cd4a2caa51267fc5cbc946fe7e -r a4bb699ba3cd642a7efe76ecbf06e29a38d2a10a yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -54,6 +54,7 @@
     pasteboard_repo = '',
     reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
+    test_data_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',


diff -r dfaaede9f09714cd4a2caa51267fc5cbc946fe7e -r a4bb699ba3cd642a7efe76ecbf06e29a38d2a10a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3502,10 +3502,7 @@
         for gi, g in enumerate(grids): self._grids[gi] = g
 
     def _is_fully_enclosed(self, grid):
-        r = np.abs(grid._corners - self.center)
-        r = np.minimum(r, np.abs(self.DW[None,:]-r))
-        corner_radius = np.sqrt((r**2.0).sum(axis=1))
-        return np.all(corner_radius <= self.radius)
+        return False
 
     @restore_grid_state # Pains me not to decorate with cache_mask here
     def _get_cut_mask(self, grid, field=None):


diff -r dfaaede9f09714cd4a2caa51267fc5cbc946fe7e -r a4bb699ba3cd642a7efe76ecbf06e29a38d2a10a yt/frontends/enzo/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -0,0 +1,51 @@
+"""
+Enzo frontend tests using moving7
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.enzo.api import EnzoStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
+           "particle_density")
+
+m7 = "DD0010/moving7_0010"
+ at requires_pf(m7)
+def test_moving7():
+    pf = data_dir_load(m7)
+    yield assert_equal, str(pf), "moving7_0010"
+    for test in small_patch_amr(m7, _fields):
+        yield test
+
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+ at requires_pf(g30, big_data=True)
+def test_galaxy0030():
+    pf = data_dir_load(g30)
+    yield assert_equal, str(pf), "galaxy0030"
+    for test in big_patch_amr(g30, _fields):
+        yield test


diff -r dfaaede9f09714cd4a2caa51267fc5cbc946fe7e -r a4bb699ba3cd642a7efe76ecbf06e29a38d2a10a yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -29,6 +29,15 @@
     assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
 
 def assert_rel_equal(a1, a2, decimals):
+    # We have nan checks in here because occasionally we have fields that get
+    # weighted without non-zero weights.  I'm looking at you, particle fields!
+    if isinstance(a1, np.ndarray):
+        assert(a1.size == a2.size)
+        # Mask out NaNs
+        a1[np.isnan(a1)] = 1.0
+        a2[np.isnan(a2)] = 1.0
+    elif np.isnan(a1) and np.isnan(a2):
+        return True
     return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):


diff -r dfaaede9f09714cd4a2caa51267fc5cbc946fe7e -r a4bb699ba3cd642a7efe76ecbf06e29a38d2a10a yt/utilities/answer_testing/__init__.py
--- a/yt/utilities/answer_testing/__init__.py
+++ b/yt/utilities/answer_testing/__init__.py
@@ -22,10 +22,3 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
-import runner
-import output_tests
-from runner import RegressionTestRunner
-
-from output_tests import RegressionTest, SingleOutputTest, \
-    MultipleOutputTest, YTStaticOutputTest, create_test




diff -r dfaaede9f09714cd4a2caa51267fc5cbc946fe7e -r a4bb699ba3cd642a7efe76ecbf06e29a38d2a10a yt/utilities/answer_testing/default_tests.py
--- a/yt/utilities/answer_testing/default_tests.py
+++ b/yt/utilities/answer_testing/default_tests.py
@@ -67,3 +67,4 @@
         for field in sorted(self.result):
             for p1, p2 in zip(self.result[field], old_result[field]):
                 self.compare_data_arrays(p1, p2, self.tolerance)
+


diff -r dfaaede9f09714cd4a2caa51267fc5cbc946fe7e -r a4bb699ba3cd642a7efe76ecbf06e29a38d2a10a yt/utilities/answer_testing/framework.py
--- /dev/null
+++ b/yt/utilities/answer_testing/framework.py
@@ -0,0 +1,396 @@
+"""
+Answer Testing using Nose as a starting point
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import logging
+import os
+import hashlib
+import contextlib
+import urllib2
+import cPickle
+
+from nose.plugins import Plugin
+from yt.testing import *
+from yt.config import ytcfg
+from yt.mods import *
+import cPickle
+
+from yt.utilities.logger import disable_stream_logging
+from yt.utilities.command_line import get_yt_version
+
+mylog = logging.getLogger('nose.plugins.answer-testing')
+run_big_data = False
+
+_latest = "gold001"
+_url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
+
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=_latest, help="The name against which we will compare")
+        parser.add_option("--answer-big-data", dest="big_data",
+            default=False, help="Should we run against big data, too?",
+            action="store_true")
+        parser.add_option("--answer-name", dest="this_name",
+            default=None,
+            help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+        disable_stream_logging()
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        if options.this_name is None: options.this_name = my_hash
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
+        if options.compare_name is not None:
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
+        global run_big_data
+        run_big_data = options.big_data
+
+    def finalize(self, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestOpener(object):
+    def __init__(self, reference_name):
+        self.reference_name = reference_name
+        self.cache = {}
+
+    def get(self, pf_name, default = None):
+        if pf_name in self.cache: return self.cache[pf_name]
+        url = _url_path % (self.reference_name, pf_name)
+        try:
+            resp = urllib2.urlopen(url)
+            # This is dangerous, but we have a controlled S3 environment
+            data = resp.read()
+            rv = cPickle.loads(data)
+        except urllib2.HTTPError as ex:
+            raise YTNoOldAnswer(url)
+            mylog.warning("Missing %s (%s)", url, ex)
+            rv = default
+        self.cache[pf_name] = rv
+        return rv
+
+ at contextlib.contextmanager
+def temp_cwd(cwd):
+    oldcwd = os.getcwd()
+    os.chdir(cwd)
+    yield
+    os.chdir(oldcwd)
+
+def can_run_pf(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        try:
+            load(pf_fn)
+        except:
+            return False
+    return AnswerTestingTest.result_storage is not None
+
+def data_dir_load(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        pf = load(pf_fn)
+        pf.h
+        return pf
+
+class AnswerTestingTest(object):
+    reference_storage = None
+    def __init__(self, pf_fn):
+        self.pf = data_dir_load(pf_fn)
+
+    def __call__(self):
+        nv = self.run()
+        if self.reference_storage is not None:
+            dd = self.reference_storage.get(str(self.pf))
+            if dd is None: raise YTNoOldAnswer()
+            ov = dd[self.description]
+            self.compare(nv, ov)
+        else:
+            ov = None
+        self.result_storage[str(self.pf)][self.description] = nv
+
+    def compare(self, new_result, old_result):
+        raise RuntimeError
+
+    def create_obj(self, pf, obj_type):
+        # obj_type should be tuple of
+        #  ( obj_name, ( args ) )
+        if obj_type is None:
+            return pf.h.all_data()
+        cls = getattr(pf.h, obj_type[0])
+        obj = cls(*obj_type[1])
+        return obj
+
+    @property
+    def sim_center(self):
+        """
+        This returns the center of the domain.
+        """
+        return 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+
+    @property
+    def max_dens_location(self):
+        """
+        This is a helper function to return the location of the most dense
+        point.
+        """
+        return self.pf.h.find_max("Density")[1]
+
+    @property
+    def entire_simulation(self):
+        """
+        Return an unsorted array of values that cover the entire domain.
+        """
+        return self.pf.h.all_data()
+
+    @property
+    def description(self):
+        obj_type = getattr(self, "obj_type", None)
+        if obj_type is None:
+            oname = "all"
+        else:
+            oname = "_".join((str(s) for s in obj_type))
+        args = [self._type_name, str(self.pf), oname]
+        args += [str(getattr(self, an)) for an in self._attrs]
+        return "_".join(args)
+        
+class FieldValuesTest(AnswerTestingTest):
+    _type_name = "FieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None):
+        super(FieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        avg = obj.quantities["WeightedAverageQuantity"](self.field,
+                             weight="Ones")
+        (mi, ma), = obj.quantities["Extrema"](self.field)
+        return np.array([avg, mi, ma])
+
+    def compare(self, new_result, old_result):
+        assert_equal(new_result, old_result)
+
+class ProjectionValuesTest(AnswerTestingTest):
+    _type_name = "ProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(ProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        return proj.field_data
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class PixelizedProjectionValuesTest(AnswerTestingTest):
+    _type_name = "PixelizedProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(PixelizedProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        frb = proj.to_frb((1.0, 'unitary'), 256)
+        frb[self.field]
+        frb[self.weight_field]
+        d = frb.data
+        d.update( dict( (("%s_sum" % f, proj[f].sum(dtype="float64"))
+                         for f in proj.field_data.keys()) ) )
+        return d
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_rel_equal(new_result[k], old_result[k], 10)
+
+class GridValuesTest(AnswerTestingTest):
+    _type_name = "GridValues"
+    _attrs = ("field",)
+
+    def __init__(self, pf_fn, field):
+        super(GridValuesTest, self).__init__(pf_fn)
+        self.field = field
+
+    def run(self):
+        hashes = {}
+        for g in self.pf.h.grids:
+            hashes[g.id] = hashlib.md5(g[self.field].tostring()).hexdigest()
+            g.clear_data()
+        return hashes
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class GridHierarchyTest(AnswerTestingTest):
+    _type_name = "GridHierarchy"
+    _attrs = ()
+
+    def run(self):
+        result = {}
+        result["grid_dimensions"] = self.pf.h.grid_dimensions
+        result["grid_left_edges"] = self.pf.h.grid_left_edge
+        result["grid_right_edges"] = self.pf.h.grid_right_edge
+        result["grid_levels"] = self.pf.h.grid_levels
+        result["grid_particle_count"] = self.pf.h.grid_particle_count
+        return result
+
+    def compare(self, new_result, old_result):
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class ParentageRelationshipsTest(AnswerTestingTest):
+    _type_name = "ParentageRelationships"
+    _attrs = ()
+    def run(self):
+        result = {}
+        result["parents"] = []
+        result["children"] = []
+        for g in self.pf.h.grids:
+            p = g.Parent
+            if p is None:
+                result["parents"].append(None)
+            elif hasattr(p, "id"):
+                result["parents"].append(p.id)
+            else:
+                result["parents"].append([pg.id for pg in p])
+            result["children"].append([c.id for c in g.Children])
+        return result
+
+    def compare(self, new_result, old_result):
+        for newp, oldp in zip(new_result["parents"], old_result["parents"]):
+            assert(newp == oldp)
+        for newc, oldc in zip(new_result["children"], old_result["children"]):
+            assert(newp == oldp)
+
+def requires_pf(pf_fn, big_data = False):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    if run_big_data == False and big_data == True:
+        return ffalse
+    elif not can_run_pf(pf_fn):
+        return ffalse
+    else:
+        return ftrue
+
+def small_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+
+def big_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)


diff -r dfaaede9f09714cd4a2caa51267fc5cbc946fe7e -r a4bb699ba3cd642a7efe76ecbf06e29a38d2a10a yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -29,14 +29,12 @@
 # We first create our dictionary of tests to run.  This starts out empty, and
 # as tests are imported it will be filled.
 if "TestRegistry" not in locals():
-    print "Initializing TestRegistry"
     class TestRegistry(dict):
         def __new__(cls, *p, **k):
             if not '_the_instance' in cls.__dict__:
                 cls._the_instance = dict.__new__(cls)
                 return cls._the_instance
 if "test_registry" not in locals():
-    print "Initializing test_registry"
     test_registry = TestRegistry()
 
 # The exceptions we raise, related to the character of the failure.


diff -r dfaaede9f09714cd4a2caa51267fc5cbc946fe7e -r a4bb699ba3cd642a7efe76ecbf06e29a38d2a10a yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -154,3 +154,11 @@
     def __str__(self):
         return "No filenames were found to match the pattern: " + \
                "'%s'" % (self.pattern)
+
+class YTNoOldAnswer(YTException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "There is no old answer available.\n" + \
+               str(self.path)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/7f963c6458bc/
changeset:   7f963c6458bc
branch:      yt
user:        sskory
date:        2012-10-24 05:04:24
summary:     Removed ellipsoid from test_boolean_mix_periodicity test because
it is problematic.
affected #:  1 file

diff -r a4bb699ba3cd642a7efe76ecbf06e29a38d2a10a -r 7f963c6458bced3770cfef7fd0dc4b85f9b2b338 yt/data_objects/tests/test_boolean_regions.py
--- a/yt/data_objects/tests/test_boolean_regions.py
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -322,21 +322,17 @@
         re = pf.h.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
         sp = pf.h.sphere([0.95]*3, 0.3) # wraps around
         cyl = pf.h.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around
-        ell = pf.h.ellipsoid([0.35]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
-            np.array([0.1]*3)) # no wrap
         # Get original indices
         rei = re['ID']
         spi = sp['ID']
         cyli = cyl['ID']
-        elli = ell['ID']
         # Make some booleans
         # whole box minux spherical bites at corners
         bo1 = pf.h.boolean([re, "NOT", sp])
         # sphere plus cylinder
         bo2 = pf.h.boolean([sp, "OR", cyl])
-        # a big jumble, the region minus the ell+cyl (scepter shaped?), plus the
-        # sphere which should add back some of what the ell+cyl took out.
-        bo3 = pf.h.boolean([re, "NOT", "(", ell, "OR", cyl, ")", "OR", sp])
+        # a jumble, the region minus the sp+cyl
+        bo3 = pf.h.boolean([re, "NOT", "(", sp, "OR", cyl, ")"])
         # Now make sure the indices also behave as we expect.
         expect = np.setdiff1d(rei, spi)
         ii = bo1['ID']
@@ -348,9 +344,8 @@
         ii.sort()
         yield assert_array_equal, expect, ii
         #
-        expect = np.union1d(elli, cyli)
+        expect = np.union1d(spi, cyli)
         expect = np.setdiff1d(rei, expect)
-        expect = np.union1d(expect, spi)
         ii = bo3['ID']
         ii.sort()
         yield assert_array_equal, expect, ii



https://bitbucket.org/yt_analysis/yt-3.0/changeset/81d3dd529a1d/
changeset:   81d3dd529a1d
branch:      yt
user:        MatthewTurk
date:        2012-10-24 21:35:09
summary:     Merged in sskory/yt (pull request #311)
affected #:  2 files

diff -r 4c9fee8d49acfebfa2756f286d0352e85a4f6de2 -r 81d3dd529a1d278f5f3434c4e15d8336063dd0b6 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -71,7 +71,7 @@
 def force_array(item, shape):
     try:
         sh = item.shape
-        return item
+        return item.copy()
     except AttributeError:
         if item:
             return np.ones(shape, dtype='bool')


diff -r 4c9fee8d49acfebfa2756f286d0352e85a4f6de2 -r 81d3dd529a1d278f5f3434c4e15d8336063dd0b6 yt/data_objects/tests/test_boolean_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -0,0 +1,352 @@
+from yt.testing import *
+from yt.data_objects.api import add_field
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    def _ID(field, data):
+        width = data.pf.domain_right_edge - data.pf.domain_left_edge
+        delta = width / data.pf.h.get_smallest_dx()
+        x = data['x'] - data.pf.h.get_smallest_dx() / 2.
+        y = data['y'] - data.pf.h.get_smallest_dx() / 2.
+        z = data['z'] - data.pf.h.get_smallest_dx() / 2.
+        xi = x / data.pf.h.get_smallest_dx()
+        yi = y / data.pf.h.get_smallest_dx()
+        zi = z / data.pf.h.get_smallest_dx()
+        index = xi + delta[0] * (yi + delta[1] * zi)
+        index = index.astype('int64')
+        return index
+
+    add_field("ID", function=_ID)
+
+def test_boolean_spheres_no_overlap():
+    r"""Test to make sure that boolean objects (spheres, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping spheres. This also checks that the original spheres
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.25, 0.25, 0.25], 0.15)
+        sp2 = pf.h.sphere([0.75, 0.75, 0.75], 0.15)
+        # Store the original indices
+        i1 = sp1['ID']
+        i1.sort()
+        i2 = sp2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # empty
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # only sp1
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = sp1['ID']
+        new_i1.sort()
+        new_i2 = sp2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+ 
+def test_boolean_spheres_overlap():
+    r"""Test to make sure that boolean objects (spheres, overlap)
+    behave the way we expect.
+
+    Test overlapping spheres.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.45, 0.45, 0.45], 0.15)
+        sp2 = pf.h.sphere([0.55, 0.55, 0.55], 0.15)
+        # Get indices of both.
+        i1 = sp1['ID']
+        i2 = sp2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # overlap (a lens)
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # sp1 - sp2 (sphere with bite)
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination (H2)
+        # Now make sure the indices also behave as we expect.
+        lens = np.intersect1d(i1, i2)
+        apple = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, lens
+        yield assert_array_equal, b2, apple
+        yield assert_array_equal, b3, both
+
+def test_boolean_regions_no_overlap():
+    r"""Test to make sure that boolean objects (regions, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping regions. This also checks that the original regions
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.25]*3, [0.2]*3, [0.3]*3)
+        re2 = pf.h.region([0.65]*3, [0.6]*3, [0.7]*3)
+        # Store the original indices
+        i1 = re1['ID']
+        i1.sort()
+        i2 = re2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # empty
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # only re1
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = re1['ID']
+        new_i1.sort()
+        new_i2 = re2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1 
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_regions_overlap():
+    r"""Test to make sure that boolean objects (regions, overlap)
+    behave the way we expect.
+
+    Test overlapping regions.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.55]*3, [0.5]*3, [0.6]*3)
+        re2 = pf.h.region([0.6]*3, [0.55]*3, [0.65]*3)
+        # Get indices of both.
+        i1 = re1['ID']
+        i2 = re2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # overlap (small cube)
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # sp1 - sp2 (large cube with bite)
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination (merged large cubes)
+        # Now make sure the indices also behave as we expect.
+        cube = np.intersect1d(i1, i2)
+        bite_cube = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, cube
+        yield assert_array_equal, b2, bite_cube
+        yield assert_array_equal, b3, both
+
+def test_boolean_cylinders_no_overlap():
+    r"""Test to make sure that boolean objects (cylinders, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping cylinders. This also checks that the original cylinders
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.25]*3, [1, 0, 0], 0.1, 0.1)
+        cyl2 = pf.h.disk([0.75]*3, [1, 0, 0], 0.1, 0.1)
+        # Store the original indices
+        i1 = cyl1['ID']
+        i1.sort()
+        i2 = cyl2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # empty
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # only cyl1
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = cyl1['ID']
+        new_i1.sort()
+        new_i2 = cyl2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_cylinders_overlap():
+    r"""Test to make sure that boolean objects (cylinders, overlap)
+    behave the way we expect.
+
+    Test overlapping cylinders.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.45]*3, [1, 0, 0], 0.2, 0.2)
+        cyl2 = pf.h.disk([0.55]*3, [1, 0, 0], 0.2, 0.2)
+        # Get indices of both.
+        i1 = cyl1['ID']
+        i2 = cyl2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # overlap (vertically extened lens)
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # sp1 - sp2 (disk minus a bite)
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination (merged disks)
+        # Now make sure the indices also behave as we expect.
+        vlens = np.intersect1d(i1, i2)
+        bite_disk = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, vlens
+        yield assert_array_equal, b2, bite_disk
+        yield assert_array_equal, b3, both
+
+def test_boolean_ellipsoids_no_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping ellipsoids. This also checks that the original
+    ellipsoids don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Store the original indices
+        i1 = ell1['ID']
+        i1.sort()
+        i2 = ell2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # empty
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # only cyl1
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = ell1['ID']
+        new_i1.sort()
+        new_i2 = ell2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1 
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_ellipsoids_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, overlap)
+    behave the way we expect.
+
+    Test overlapping ellipsoids.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Get indices of both.
+        i1 = ell1['ID']
+        i2 = ell2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # overlap
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # ell1 - ell2
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # Now make sure the indices also behave as we expect.
+        overlap = np.intersect1d(i1, i2)
+        diff = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, overlap
+        yield assert_array_equal, b2, diff
+        yield assert_array_equal, b3, both
+
+def test_boolean_mix_periodicity():
+    r"""Test that a hybrid boolean region behaves as we expect.
+
+    This also tests nested logic and that periodicity works.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re = pf.h.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
+        sp = pf.h.sphere([0.95]*3, 0.3) # wraps around
+        cyl = pf.h.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around
+        # Get original indices
+        rei = re['ID']
+        spi = sp['ID']
+        cyli = cyl['ID']
+        # Make some booleans
+        # whole box minux spherical bites at corners
+        bo1 = pf.h.boolean([re, "NOT", sp])
+        # sphere plus cylinder
+        bo2 = pf.h.boolean([sp, "OR", cyl])
+        # a jumble, the region minus the sp+cyl
+        bo3 = pf.h.boolean([re, "NOT", "(", sp, "OR", cyl, ")"])
+        # Now make sure the indices also behave as we expect.
+        expect = np.setdiff1d(rei, spi)
+        ii = bo1['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(spi, cyli)
+        ii = bo2['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(spi, cyli)
+        expect = np.setdiff1d(rei, expect)
+        ii = bo3['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/6794b2798b5a/
changeset:   6794b2798b5a
branch:      yt
user:        MatthewTurk
date:        2012-10-24 21:58:34
summary:     Swap out get_smallest_dx for 8192.  Should fix the buildbot.
affected #:  1 file

diff -r 81d3dd529a1d278f5f3434c4e15d8336063dd0b6 -r 6794b2798b5a9c46cc505bd0d7cd271e65c82f0c yt/data_objects/tests/test_boolean_regions.py
--- a/yt/data_objects/tests/test_boolean_regions.py
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -6,13 +6,14 @@
     ytcfg["yt","__withintesting"] = "True"
     def _ID(field, data):
         width = data.pf.domain_right_edge - data.pf.domain_left_edge
-        delta = width / data.pf.h.get_smallest_dx()
-        x = data['x'] - data.pf.h.get_smallest_dx() / 2.
-        y = data['y'] - data.pf.h.get_smallest_dx() / 2.
-        z = data['z'] - data.pf.h.get_smallest_dx() / 2.
-        xi = x / data.pf.h.get_smallest_dx()
-        yi = y / data.pf.h.get_smallest_dx()
-        zi = z / data.pf.h.get_smallest_dx()
+        min_dx = 1.0/8192
+        delta = width / min_dx
+        x = data['x'] - min_dx / 2.
+        y = data['y'] - min_dx / 2.
+        z = data['z'] - min_dx / 2.
+        xi = x / min_dx
+        yi = y / min_dx
+        zi = z / min_dx
         index = xi + delta[0] * (yi + delta[1] * zi)
         index = index.astype('int64')
         return index



https://bitbucket.org/yt_analysis/yt-3.0/changeset/1922fd7145ac/
changeset:   1922fd7145ac
branch:      yt
user:        gsiisg
date:        2012-10-25 02:10:34
summary:     updated doc string to explain tilt in hopefully a clearer way, prevent user from inputting A<B or B<C by raising an exception, mentions that the new e0 will be normalized automatically
affected #:  2 files

diff -r 6794b2798b5a9c46cc505bd0d7cd271e65c82f0c -r 1922fd7145acd17f277eadc9f00220f81294a673 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3528,10 +3528,38 @@
                  pf=None, **kwargs):
         """
         By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we
-        can define a ellipsoid of any proportion.  Only cells whose centers are
-        within the ellipsoid will be selected.
+        can define a ellipsoid of any proportion.  Only cells whose
+        centers are within the ellipsoid will be selected.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the ellipsoid.
+        A : float
+            The magnitude of the largest semi-major axis of the ellipsoid.
+        B : float
+            The magnitude of the medium semi-major axis of the ellipsoid.
+        C : float
+            The magnitude of the smallest semi-major axis of the ellipsoid.
+        e0 : array_like (automatically normalized)
+            the direction of the largest semi-major axis of the ellipsoid
+        tilt : float
+            After the rotation about the z-axis to allign e0 to x in the x-y
+            plane, and then rotating about the y-axis to align e0 completely
+            to the x-axis, tilt is the angle in radians remaining to
+            rotate about the x-axis to align both e1 to the y-axis and e2 to
+            the z-axis.
+        Examples
+        --------
+        >>> pf = load("DD####/DD####")
+        >>> c = [0.5,0.5,0.5]
+        >>> ell = pf.h.ellipsoid(c, 0.1, 0.1, 0.1, np.array([0.1, 0.1, 0.1]), 0.2)
         """
+
         AMR3DData.__init__(self, np.array(center), fields, pf, **kwargs)
+        # make sure the magnitudes of semi-major axes are in order
+        if A<B or B<C:
+            raise YTEllipsoidOrdering(pf, A, B, C)
         # make sure the smallest side is not smaller than dx
         if C < self.hierarchy.get_smallest_dx():
             raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())


diff -r 6794b2798b5a9c46cc505bd0d7cd271e65c82f0c -r 1922fd7145acd17f277eadc9f00220f81294a673 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -162,3 +162,13 @@
     def __str__(self):
         return "There is no old answer available.\n" + \
                str(self.path)
+
+class YTEllipsoidOrdering(YTException):
+    def __init__(self, pf, A, B, C):
+        YTException.__init__(self, pf)
+        self._A = A
+        self._B = B
+        self._C = C
+
+    def __str__(self):
+        return "Must have A>=B>=C"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/ba7fa4cdeeba/
changeset:   ba7fa4cdeeba
branch:      yt
user:        MatthewTurk
date:        2012-10-24 20:54:50
summary:     This adds an ellipsoid tests script, which currently fails.
affected #:  2 files

diff -r 221b5163bffa2e478a2b65d80090adf1be45bb2e -r ba7fa4cdeeba48591c7488891046cc395713c03f yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -5,7 +5,7 @@
 Affiliation: KIPAC/SLAC/Stanford
 Author: Britton Smith <Britton.Smith at colorado.edu>
 Affiliation: University of Colorado at Boulder
-Author: Geoffrey So <gsiisg at gmail.com> (AMREllipsoidBase)
+Author: Geoffrey So <gsiisg at gmail.com>
 Affiliation: UCSD Physics/CASS
 Homepage: http://yt-project.org/
 License:


diff -r 221b5163bffa2e478a2b65d80090adf1be45bb2e -r ba7fa4cdeeba48591c7488891046cc395713c03f yt/data_objects/tests/test_ellipsoid.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -0,0 +1,32 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_ellipsoid():
+    # We decompose in different ways
+    cs = [[0.5, 0.5, 0.5],
+          [0.1, 0.2, 0.3],
+          [0.8, 0.8, 0.8]]
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs)
+        ABC = np.random.random((3, 12)) * 0.1
+        e0s = np.random.random((3, 12))
+        tilts = np.random.random((3, 12))
+        ABC[:,0] = 0.1
+
+        for i in range(12):
+            for c in cs:
+                A, B, C = reversed(sorted(ABC[:,i]))
+                e0 = e0s[:,i]
+                tilt = tilts[:,i]
+                ell = pf.h.ellipsoid(c, A, B, C, e0, tilt)
+                yield assert_equal, np.all(ell["Radius"] <= A), True
+                for i in xrange(ell["Radius"].size):
+                    pos = np.array([ell[ax][i] for ax in 'xyz'])
+                    v = 0.0
+                    v += (pos * ell._e0).sum() / ell._A
+                    v += (pos * ell._e1).sum() / ell._B
+                    v += (pos * ell._e2).sum() / ell._C
+                    yield assert_equal, (v <= 1.0), True



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a086a1e8aac5/
changeset:   a086a1e8aac5
branch:      yt
user:        MatthewTurk
date:        2012-10-24 20:57:29
summary:     Fixing up ellipsoidal tests to be a lot faster.
affected #:  1 file

diff -r ba7fa4cdeeba48591c7488891046cc395713c03f -r a086a1e8aac54197d64b5a40872a67c4434a2f0c yt/data_objects/tests/test_ellipsoid.py
--- a/yt/data_objects/tests/test_ellipsoid.py
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -23,10 +23,12 @@
                 tilt = tilts[:,i]
                 ell = pf.h.ellipsoid(c, A, B, C, e0, tilt)
                 yield assert_equal, np.all(ell["Radius"] <= A), True
+                all_in = True
                 for i in xrange(ell["Radius"].size):
                     pos = np.array([ell[ax][i] for ax in 'xyz'])
                     v = 0.0
                     v += (pos * ell._e0).sum() / ell._A
                     v += (pos * ell._e1).sum() / ell._B
                     v += (pos * ell._e2).sum() / ell._C
-                    yield assert_equal, (v <= 1.0), True
+                    all_in = (v <= 1.0) and all_in
+                yield assert_equal, all_in, True



https://bitbucket.org/yt_analysis/yt-3.0/changeset/288fd314cb04/
changeset:   288fd314cb04
branch:      yt
user:        MatthewTurk
date:        2012-10-24 21:01:55
summary:     Oops, could get too small of spheres, as well as we don't really want to double
up our index of iteration.  Also, use vectors.
affected #:  1 file

diff -r a086a1e8aac54197d64b5a40872a67c4434a2f0c -r 288fd314cb048b305dc82156594dee020e8622c0 yt/data_objects/tests/test_ellipsoid.py
--- a/yt/data_objects/tests/test_ellipsoid.py
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -2,6 +2,7 @@
 
 def setup():
     from yt.config import ytcfg
+    ytcfg["yt","loglevel"] = "50"
     ytcfg["yt","__withintesting"] = "True"
 
 def test_ellipsoid():
@@ -11,24 +12,25 @@
           [0.8, 0.8, 0.8]]
     for nprocs in [1, 2, 4, 8]:
         pf = fake_random_pf(64, nprocs = nprocs)
+        min_dx = 2.0/pf.domain_dimensions
         ABC = np.random.random((3, 12)) * 0.1
         e0s = np.random.random((3, 12))
         tilts = np.random.random((3, 12))
         ABC[:,0] = 0.1
-
         for i in range(12):
             for c in cs:
                 A, B, C = reversed(sorted(ABC[:,i]))
+                A = max(A, min_dx[0])
+                B = max(B, min_dx[1])
+                C = max(C, min_dx[2])
                 e0 = e0s[:,i]
                 tilt = tilts[:,i]
                 ell = pf.h.ellipsoid(c, A, B, C, e0, tilt)
                 yield assert_equal, np.all(ell["Radius"] <= A), True
                 all_in = True
-                for i in xrange(ell["Radius"].size):
-                    pos = np.array([ell[ax][i] for ax in 'xyz'])
-                    v = 0.0
-                    v += (pos * ell._e0).sum() / ell._A
-                    v += (pos * ell._e1).sum() / ell._B
-                    v += (pos * ell._e2).sum() / ell._C
-                    all_in = (v <= 1.0) and all_in
-                yield assert_equal, all_in, True
+                pos = np.array([ell[ax] for ax in 'xyz'])
+                v  = np.zeros_like(ell["Radius"])
+                v += (pos * ell._e0[:,None]).sum(axis=0) / ell._A
+                v += (pos * ell._e1[:,None]).sum(axis=0) / ell._B
+                v += (pos * ell._e2[:,None]).sum(axis=0) / ell._C
+                yield assert_equal, np.all(v <= 1.0), True



https://bitbucket.org/yt_analysis/yt-3.0/changeset/4df7141c25c1/
changeset:   4df7141c25c1
branch:      yt
user:        MatthewTurk
date:        2012-10-24 21:08:47
summary:     Subtract off the position
affected #:  1 file

diff -r 288fd314cb048b305dc82156594dee020e8622c0 -r 4df7141c25c1cd1bc27c770ee6a64126f8547e47 yt/data_objects/tests/test_ellipsoid.py
--- a/yt/data_objects/tests/test_ellipsoid.py
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -7,9 +7,9 @@
 
 def test_ellipsoid():
     # We decompose in different ways
-    cs = [[0.5, 0.5, 0.5],
-          [0.1, 0.2, 0.3],
-          [0.8, 0.8, 0.8]]
+    cs = [np.array([0.5, 0.5, 0.5]),
+          np.array([0.1, 0.2, 0.3]),
+          np.array([0.8, 0.8, 0.8])]
     for nprocs in [1, 2, 4, 8]:
         pf = fake_random_pf(64, nprocs = nprocs)
         min_dx = 2.0/pf.domain_dimensions
@@ -30,7 +30,7 @@
                 all_in = True
                 pos = np.array([ell[ax] for ax in 'xyz'])
                 v  = np.zeros_like(ell["Radius"])
-                v += (pos * ell._e0[:,None]).sum(axis=0) / ell._A
-                v += (pos * ell._e1[:,None]).sum(axis=0) / ell._B
-                v += (pos * ell._e2[:,None]).sum(axis=0) / ell._C
+                v += ((pos - c[:,None]) * ell._e0[:,None]).sum(axis=0) / ell._A
+                v += ((pos - c[:,None]) * ell._e1[:,None]).sum(axis=0) / ell._B
+                v += ((pos - c[:,None]) * ell._e2[:,None]).sum(axis=0) / ell._C
                 yield assert_equal, np.all(v <= 1.0), True



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9b26aeca02b5/
changeset:   9b26aeca02b5
branch:      yt
user:        MatthewTurk
date:        2012-10-24 21:10:15
summary:     Have to take the sqrt of v.
affected #:  1 file

diff -r 4df7141c25c1cd1bc27c770ee6a64126f8547e47 -r 9b26aeca02b5cd8b4f09caab3b064156242e2897 yt/data_objects/tests/test_ellipsoid.py
--- a/yt/data_objects/tests/test_ellipsoid.py
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -27,10 +27,9 @@
                 tilt = tilts[:,i]
                 ell = pf.h.ellipsoid(c, A, B, C, e0, tilt)
                 yield assert_equal, np.all(ell["Radius"] <= A), True
-                all_in = True
                 pos = np.array([ell[ax] for ax in 'xyz'])
                 v  = np.zeros_like(ell["Radius"])
                 v += ((pos - c[:,None]) * ell._e0[:,None]).sum(axis=0) / ell._A
                 v += ((pos - c[:,None]) * ell._e1[:,None]).sum(axis=0) / ell._B
                 v += ((pos - c[:,None]) * ell._e2[:,None]).sum(axis=0) / ell._C
-                yield assert_equal, np.all(v <= 1.0), True
+                yield assert_equal, np.all(np.sqrt(v) <= 1.0), True



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d58e6b99d5a3/
changeset:   d58e6b99d5a3
branch:      yt
user:        MatthewTurk
date:        2012-10-24 21:23:34
summary:     Normalize e0 in the ellipsoid constructor and use just a single float for tilt
in the tests.
affected #:  2 files

diff -r 9b26aeca02b5cd8b4f09caab3b064156242e2897 -r d58e6b99d5a3dbd004a8a50ac62e786e890854d5 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3538,7 +3538,7 @@
         self._A = A
         self._B = B
         self._C = C
-        self._e0 = e0
+        self._e0 = e0 / (e0**2.0).sum()**0.5
         self._tilt = tilt
         
         # find the t1 angle needed to rotate about z axis to align e0 to x


diff -r 9b26aeca02b5cd8b4f09caab3b064156242e2897 -r d58e6b99d5a3dbd004a8a50ac62e786e890854d5 yt/data_objects/tests/test_ellipsoid.py
--- a/yt/data_objects/tests/test_ellipsoid.py
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -15,7 +15,7 @@
         min_dx = 2.0/pf.domain_dimensions
         ABC = np.random.random((3, 12)) * 0.1
         e0s = np.random.random((3, 12))
-        tilts = np.random.random((3, 12))
+        tilts = np.random.random(12)
         ABC[:,0] = 0.1
         for i in range(12):
             for c in cs:
@@ -24,7 +24,7 @@
                 B = max(B, min_dx[1])
                 C = max(C, min_dx[2])
                 e0 = e0s[:,i]
-                tilt = tilts[:,i]
+                tilt = tilts[i]
                 ell = pf.h.ellipsoid(c, A, B, C, e0, tilt)
                 yield assert_equal, np.all(ell["Radius"] <= A), True
                 pos = np.array([ell[ax] for ax in 'xyz'])



https://bitbucket.org/yt_analysis/yt-3.0/changeset/703f3d1b8448/
changeset:   703f3d1b8448
branch:      yt
user:        MatthewTurk
date:        2012-10-24 21:25:36
summary:     Square components of v before summing
affected #:  1 file

diff -r d58e6b99d5a3dbd004a8a50ac62e786e890854d5 -r 703f3d1b8448e2f5b6609194c7c4d854f41f32c0 yt/data_objects/tests/test_ellipsoid.py
--- a/yt/data_objects/tests/test_ellipsoid.py
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -27,9 +27,9 @@
                 tilt = tilts[i]
                 ell = pf.h.ellipsoid(c, A, B, C, e0, tilt)
                 yield assert_equal, np.all(ell["Radius"] <= A), True
-                pos = np.array([ell[ax] for ax in 'xyz'])
+                p = np.array([ell[ax] for ax in 'xyz'])
                 v  = np.zeros_like(ell["Radius"])
-                v += ((pos - c[:,None]) * ell._e0[:,None]).sum(axis=0) / ell._A
-                v += ((pos - c[:,None]) * ell._e1[:,None]).sum(axis=0) / ell._B
-                v += ((pos - c[:,None]) * ell._e2[:,None]).sum(axis=0) / ell._C
+                v += (((p - c[:,None]) * ell._e0[:,None]).sum(axis=0) / ell._A)**2
+                v += (((p - c[:,None]) * ell._e1[:,None]).sum(axis=0) / ell._B)**2
+                v += (((p - c[:,None]) * ell._e2[:,None]).sum(axis=0) / ell._C)**2
                 yield assert_equal, np.all(np.sqrt(v) <= 1.0), True



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e10bee53b408/
changeset:   e10bee53b408
branch:      yt
user:        MatthewTurk
date:        2012-10-24 21:41:53
summary:     If we normalize like this, the e0 used below in the cross product will already
be normalized.  This makes all tests pass.
affected #:  1 file

diff -r 703f3d1b8448e2f5b6609194c7c4d854f41f32c0 -r e10bee53b4089b31450a0de688a932ae610e736d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3538,7 +3538,7 @@
         self._A = A
         self._B = B
         self._C = C
-        self._e0 = e0 / (e0**2.0).sum()**0.5
+        self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
         self._tilt = tilt
         
         # find the t1 angle needed to rotate about z axis to align e0 to x



https://bitbucket.org/yt_analysis/yt-3.0/changeset/189d2eace2e9/
changeset:   189d2eace2e9
branch:      yt
user:        samskillman
date:        2012-10-25 02:39:55
summary:     Merged in MatthewTurk/yt (pull request #315)
affected #:  2 files

diff -r 1922fd7145acd17f277eadc9f00220f81294a673 -r 189d2eace2e91e9ac7aeae9593fe465b50b6cf92 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -5,7 +5,7 @@
 Affiliation: KIPAC/SLAC/Stanford
 Author: Britton Smith <Britton.Smith at colorado.edu>
 Affiliation: University of Colorado at Boulder
-Author: Geoffrey So <gsiisg at gmail.com> (AMREllipsoidBase)
+Author: Geoffrey So <gsiisg at gmail.com>
 Affiliation: UCSD Physics/CASS
 Homepage: http://yt-project.org/
 License:
@@ -3566,7 +3566,7 @@
         self._A = A
         self._B = B
         self._C = C
-        self._e0 = e0
+        self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
         self._tilt = tilt
         
         # find the t1 angle needed to rotate about z axis to align e0 to x


diff -r 1922fd7145acd17f277eadc9f00220f81294a673 -r 189d2eace2e91e9ac7aeae9593fe465b50b6cf92 yt/data_objects/tests/test_ellipsoid.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -0,0 +1,35 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","loglevel"] = "50"
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_ellipsoid():
+    # We decompose in different ways
+    cs = [np.array([0.5, 0.5, 0.5]),
+          np.array([0.1, 0.2, 0.3]),
+          np.array([0.8, 0.8, 0.8])]
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs)
+        min_dx = 2.0/pf.domain_dimensions
+        ABC = np.random.random((3, 12)) * 0.1
+        e0s = np.random.random((3, 12))
+        tilts = np.random.random(12)
+        ABC[:,0] = 0.1
+        for i in range(12):
+            for c in cs:
+                A, B, C = reversed(sorted(ABC[:,i]))
+                A = max(A, min_dx[0])
+                B = max(B, min_dx[1])
+                C = max(C, min_dx[2])
+                e0 = e0s[:,i]
+                tilt = tilts[i]
+                ell = pf.h.ellipsoid(c, A, B, C, e0, tilt)
+                yield assert_equal, np.all(ell["Radius"] <= A), True
+                p = np.array([ell[ax] for ax in 'xyz'])
+                v  = np.zeros_like(ell["Radius"])
+                v += (((p - c[:,None]) * ell._e0[:,None]).sum(axis=0) / ell._A)**2
+                v += (((p - c[:,None]) * ell._e1[:,None]).sum(axis=0) / ell._B)**2
+                v += (((p - c[:,None]) * ell._e2[:,None]).sum(axis=0) / ell._C)**2
+                yield assert_equal, np.all(np.sqrt(v) <= 1.0), True



https://bitbucket.org/yt_analysis/yt-3.0/changeset/6a953d29db94/
changeset:   6a953d29db94
branch:      yt
user:        chiffre
date:        2012-10-29 14:49:37
summary:     Fixed the Marker-Callback
affected #:  1 file

diff -r 189d2eace2e91e9ac7aeae9593fe465b50b6cf92 -r 6a953d29db94693d7f5ee950622cb291e528221d yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -743,13 +743,18 @@
         self.plot_args = plot_args
 
     def __call__(self, plot):
-        if len(self.pos) == 3:
+        xx0, xx1 = plot._axes.get_xlim()
+        yy0, yy1 = plot._axes.get_ylim()
+        if na.array(self.pos).shape == (3,):
             pos = (self.pos[x_dict[plot.data.axis]],
                    self.pos[y_dict[plot.data.axis]])
-        else: pos = self.pos
+        elif na.array(self.pos).shape == (2,):
+            pos = self.pos
         x,y = self.convert_to_plot(plot, pos)
         plot._axes.hold(True)
-        plot._axes.plot((x,),(y,),self.marker, **self.plot_args)
+        plot._axes.scatter(x,y, marker = self.marker, **self.plot_args)
+        plot._axes.set_xlim(xx0,xx1)
+        plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
 
 class SphereCallback(PlotCallback):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/4180878cc840/
changeset:   4180878cc840
branch:      yt
user:        chiffre
date:        2012-10-29 17:00:04
summary:     Changed the prefix for numpy
affected #:  1 file

diff -r 6a953d29db94693d7f5ee950622cb291e528221d -r 4180878cc840a3443148bd9a5e755ad17643f722 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -745,10 +745,10 @@
     def __call__(self, plot):
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
-        if na.array(self.pos).shape == (3,):
+        if np.array(self.pos).shape == (3,):
             pos = (self.pos[x_dict[plot.data.axis]],
                    self.pos[y_dict[plot.data.axis]])
-        elif na.array(self.pos).shape == (2,):
+        elif np.array(self.pos).shape == (2,):
             pos = self.pos
         x,y = self.convert_to_plot(plot, pos)
         plot._axes.hold(True)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/56ad3efb61c3/
changeset:   56ad3efb61c3
branch:      yt
user:        chiffre
date:        2012-10-30 10:39:04
summary:     Changed the prefix to np and added plot_args for more control
affected #:  1 file

diff -r 4180878cc840a3443148bd9a5e755ad17643f722 -r 56ad3efb61c308414e791fc4575db3f6c6ffa469 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -374,13 +374,9 @@
 
 class StreamlineCallback(PlotCallback):
     _type_name = "streamlines"
-    def __init__(self, field_x, field_y, factor = 16,
-                 density = 1, arrowsize = 1, arrowstyle = None,
-                 color = None, normalize = False):
+    def __init__(self, field_x, field_y, factor = 16, density = 1, plot_args=None):
         """
-        annotate_streamlines(field_x, field_y, factor = 16, density = 1,
-                             arrowsize = 1, arrowstyle = None,
-                             color = None, normalize = False):
+        annotate_streamlines(field_x, field_y, factor = 16, plot_args=None):
 
         Add streamlines to any plot, using the *field_x* and *field_y*
         from the associated data, skipping every *factor* datapoints like
@@ -392,12 +388,8 @@
         self.bv_x = self.bv_y = 0
         self.factor = factor
         self.dens = density
-        self.arrowsize = arrowsize
-        if arrowstyle is None : arrowstyle='-|>'
-        self.arrowstyle = arrowstyle
-        if color is None : color = "#000000"
-        self.color = color
-        self.normalize = normalize
+        if plot_args is None: plot_args = {}
+        self.plot_args = plot_args
         
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -421,15 +413,10 @@
                              plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
                            (x0, x1, y0, y1),).transpose()
-        X,Y = (na.linspace(xx0,xx1,nx,endpoint=True),
-                          na.linspace(yy0,yy1,ny,endpoint=True))
-        if self.normalize:
-            nn = na.sqrt(pixX**2 + pixY**2)
-            pixX /= nn
-            pixY /= nn
-        plot._axes.streamplot(X,Y, pixX, pixY, density=self.dens,
-                              arrowsize=self.arrowsize, arrowstyle=self.arrowstyle,
-                              color=self.color, norm=self.normalize)
+        X,Y = (np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
+        plot._axes.streamplot(X,Y, pixX, pixY, density = self.dens,
+                              **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5736bc9e7f29/
changeset:   5736bc9e7f29
branch:      yt
user:        chiffre
date:        2012-10-30 10:48:03
summary:     Fixed the docu
affected #:  1 file

diff -r 56ad3efb61c308414e791fc4575db3f6c6ffa469 -r 5736bc9e7f29ccc6b997a4e18a9bb8aac55d46c7 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -374,9 +374,11 @@
 
 class StreamlineCallback(PlotCallback):
     _type_name = "streamlines"
-    def __init__(self, field_x, field_y, factor = 16, density = 1, plot_args=None):
+    def __init__(self, field_x, field_y, factor = 16,
+                 density = 1, plot_args=None):
         """
-        annotate_streamlines(field_x, field_y, factor = 16, plot_args=None):
+        annotate_streamlines(field_x, field_y, factor = 16,
+                             density = 1, plot_args=None):
 
         Add streamlines to any plot, using the *field_x* and *field_y*
         from the associated data, skipping every *factor* datapoints like



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f77b0a03975d/
changeset:   f77b0a03975d
branch:      yt
user:        chiffre
date:        2012-10-30 19:58:19
summary:     Added a if-sequence for units
affected #:  1 file

diff -r 5736bc9e7f29ccc6b997a4e18a9bb8aac55d46c7 -r f77b0a03975db973274ca3aeccc7765b840f45c4 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -783,10 +783,11 @@
             except ParseFatalException, err:
                 raise YTCannotParseFieldDisplayName(f,field_name,str(err))
 
-            try:
-                parser.parse(r'$'+md['units']+r'$')
-            except ParseFatalException, err:
-                raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
+            if md['units'] != None and md['units'] != '': 
+                try:
+                    parser.parse(r'$'+md['units']+r'$')
+                except ParseFatalException, err:
+                    raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
 
             if md['units'] == None or md['units'] == '':
                 label = field_name



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8db15e870d38/
changeset:   8db15e870d38
branch:      yt
user:        chiffre
date:        2012-10-30 20:15:50
summary:     Combined if-statement
affected #:  1 file

diff -r f77b0a03975db973274ca3aeccc7765b840f45c4 -r 8db15e870d38beaf2ee597db41f076dc0b946aa4 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -783,15 +783,13 @@
             except ParseFatalException, err:
                 raise YTCannotParseFieldDisplayName(f,field_name,str(err))
 
-            if md['units'] != None and md['units'] != '': 
+            if md['units'] == None or md['units'] == '':
+                label = field_name
+            else:
                 try:
                     parser.parse(r'$'+md['units']+r'$')
                 except ParseFatalException, err:
                     raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
-
-            if md['units'] == None or md['units'] == '':
-                label = field_name
-            else:
                 label = field_name+r'$\/\/('+md['units']+r')$'
 
             self.plots[f].cb.set_label(label)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/92234a1bc9ec/
changeset:   92234a1bc9ec
branch:      yt
user:        samskillman
date:        2012-10-26 00:46:53
summary:     Move FRBs to creating ImageArrays, decorate with basic information.
affected #:  1 file

diff -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 -r 92234a1bc9ec7e60b862e053703ae0b22aa6952c yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,6 +29,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
+from yt.data_objects.image_array import ImageArray
 import _MPL
 import numpy as np
 import weakref
@@ -133,8 +134,9 @@
                              self.bounds, int(self.antialias),
                              self._period, int(self.periodic),
                              ).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
     def __setitem__(self, item, val):
         self.data[item] = val
@@ -145,6 +147,28 @@
             if f not in exclude:
                 self[f]
 
+    def _get_info(self, item):
+        info = {}
+        info['data_source'] = self.data_source.__str__()  
+        info['axis'] = self.data_source.axis
+        info['field'] = str(item)
+        info['units'] = self.data_source.pf.field_info[item].get_units()
+        info['xlim'] = self.bounds[:2]
+        info['ylim'] = self.bounds[2:]
+        info['length_to_cm'] = self.data_source.pf['cm']
+        info['projected_units'] = \
+                self.data_source.pf.field_info[item].get_projected_units()
+        info['center'] = self.data_source.center
+        try:
+            info['coord'] = self.data_source.coord
+        except AttributeError:
+            pass
+        try:
+            info['weight_field'] = self.data_source.weight_field
+        except AttributeError:
+            pass
+        return info
+
     def convert_to_pixel(self, coords):
         r"""This function converts coordinates in code-space to pixel-space.
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f06057cc09ed/
changeset:   f06057cc09ed
branch:      yt
user:        samskillman
date:        2012-10-26 00:48:20
summary:     Fix up streamlines a bit.
affected #:  2 files

diff -r 92234a1bc9ec7e60b862e053703ae0b22aa6952c -r f06057cc09ed9769046b349914ebeb60475ee5d3 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -749,7 +749,7 @@
         self.dts = np.empty_like(positions[:,0])
         self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
-        self.dts[-1] = self.dts[-1]
+        self.dts[-1] = self.dts[-2]
         self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])


diff -r 92234a1bc9ec7e60b862e053703ae0b22aa6952c -r f06057cc09ed9769046b349914ebeb60475ee5d3 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -118,7 +118,9 @@
         if length is None:
             length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
-        self.steps = int(length/dx)
+        self.steps = int(length/dx)+1
+        # Fix up the dx.
+        self.dx = 1.0*self.length/self.steps
         self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/24c207c7f548/
changeset:   24c207c7f548
branch:      yt
user:        samskillman
date:        2012-10-26 00:48:33
summary:     Merging
affected #:  20 files



diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 setup.py
--- a/setup.py
+++ b/setup.py
@@ -154,7 +154,11 @@
             'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
                             'yt = yt.utilities.command_line:run_main',
-                       ]},
+                      ],
+                      'nose.plugins.0.10': [
+                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+                      ]
+        },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -54,6 +54,7 @@
     pasteboard_repo = '',
     reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
+    test_data_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -5,7 +5,7 @@
 Affiliation: KIPAC/SLAC/Stanford
 Author: Britton Smith <Britton.Smith at colorado.edu>
 Affiliation: University of Colorado at Boulder
-Author: Geoffrey So <gsiisg at gmail.com> (AMREllipsoidBase)
+Author: Geoffrey So <gsiisg at gmail.com>
 Affiliation: UCSD Physics/CASS
 Homepage: http://yt-project.org/
 License:
@@ -71,7 +71,7 @@
 def force_array(item, shape):
     try:
         sh = item.shape
-        return item
+        return item.copy()
     except AttributeError:
         if item:
             return np.ones(shape, dtype='bool')
@@ -3502,10 +3502,7 @@
         for gi, g in enumerate(grids): self._grids[gi] = g
 
     def _is_fully_enclosed(self, grid):
-        r = np.abs(grid._corners - self.center)
-        r = np.minimum(r, np.abs(self.DW[None,:]-r))
-        corner_radius = np.sqrt((r**2.0).sum(axis=1))
-        return np.all(corner_radius <= self.radius)
+        return False
 
     @restore_grid_state # Pains me not to decorate with cache_mask here
     def _get_cut_mask(self, grid, field=None):
@@ -3531,17 +3528,45 @@
                  pf=None, **kwargs):
         """
         By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we
-        can define a ellipsoid of any proportion.  Only cells whose centers are
-        within the ellipsoid will be selected.
+        can define a ellipsoid of any proportion.  Only cells whose
+        centers are within the ellipsoid will be selected.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the ellipsoid.
+        A : float
+            The magnitude of the largest semi-major axis of the ellipsoid.
+        B : float
+            The magnitude of the medium semi-major axis of the ellipsoid.
+        C : float
+            The magnitude of the smallest semi-major axis of the ellipsoid.
+        e0 : array_like (automatically normalized)
+            the direction of the largest semi-major axis of the ellipsoid
+        tilt : float
+            After the rotation about the z-axis to allign e0 to x in the x-y
+            plane, and then rotating about the y-axis to align e0 completely
+            to the x-axis, tilt is the angle in radians remaining to
+            rotate about the x-axis to align both e1 to the y-axis and e2 to
+            the z-axis.
+        Examples
+        --------
+        >>> pf = load("DD####/DD####")
+        >>> c = [0.5,0.5,0.5]
+        >>> ell = pf.h.ellipsoid(c, 0.1, 0.1, 0.1, np.array([0.1, 0.1, 0.1]), 0.2)
         """
+
         AMR3DData.__init__(self, np.array(center), fields, pf, **kwargs)
+        # make sure the magnitudes of semi-major axes are in order
+        if A<B or B<C:
+            raise YTEllipsoidOrdering(pf, A, B, C)
         # make sure the smallest side is not smaller than dx
         if C < self.hierarchy.get_smallest_dx():
             raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())
         self._A = A
         self._B = B
         self._C = C
-        self._e0 = e0
+        self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
         self._tilt = tilt
         
         # find the t1 angle needed to rotate about z axis to align e0 to x


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/data_objects/tests/test_boolean_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -0,0 +1,353 @@
+from yt.testing import *
+from yt.data_objects.api import add_field
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    def _ID(field, data):
+        width = data.pf.domain_right_edge - data.pf.domain_left_edge
+        min_dx = 1.0/8192
+        delta = width / min_dx
+        x = data['x'] - min_dx / 2.
+        y = data['y'] - min_dx / 2.
+        z = data['z'] - min_dx / 2.
+        xi = x / min_dx
+        yi = y / min_dx
+        zi = z / min_dx
+        index = xi + delta[0] * (yi + delta[1] * zi)
+        index = index.astype('int64')
+        return index
+
+    add_field("ID", function=_ID)
+
+def test_boolean_spheres_no_overlap():
+    r"""Test to make sure that boolean objects (spheres, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping spheres. This also checks that the original spheres
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.25, 0.25, 0.25], 0.15)
+        sp2 = pf.h.sphere([0.75, 0.75, 0.75], 0.15)
+        # Store the original indices
+        i1 = sp1['ID']
+        i1.sort()
+        i2 = sp2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # empty
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # only sp1
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = sp1['ID']
+        new_i1.sort()
+        new_i2 = sp2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+ 
+def test_boolean_spheres_overlap():
+    r"""Test to make sure that boolean objects (spheres, overlap)
+    behave the way we expect.
+
+    Test overlapping spheres.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.45, 0.45, 0.45], 0.15)
+        sp2 = pf.h.sphere([0.55, 0.55, 0.55], 0.15)
+        # Get indices of both.
+        i1 = sp1['ID']
+        i2 = sp2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # overlap (a lens)
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # sp1 - sp2 (sphere with bite)
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination (H2)
+        # Now make sure the indices also behave as we expect.
+        lens = np.intersect1d(i1, i2)
+        apple = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, lens
+        yield assert_array_equal, b2, apple
+        yield assert_array_equal, b3, both
+
+def test_boolean_regions_no_overlap():
+    r"""Test to make sure that boolean objects (regions, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping regions. This also checks that the original regions
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.25]*3, [0.2]*3, [0.3]*3)
+        re2 = pf.h.region([0.65]*3, [0.6]*3, [0.7]*3)
+        # Store the original indices
+        i1 = re1['ID']
+        i1.sort()
+        i2 = re2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # empty
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # only re1
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = re1['ID']
+        new_i1.sort()
+        new_i2 = re2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1 
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_regions_overlap():
+    r"""Test to make sure that boolean objects (regions, overlap)
+    behave the way we expect.
+
+    Test overlapping regions.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.55]*3, [0.5]*3, [0.6]*3)
+        re2 = pf.h.region([0.6]*3, [0.55]*3, [0.65]*3)
+        # Get indices of both.
+        i1 = re1['ID']
+        i2 = re2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # overlap (small cube)
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # sp1 - sp2 (large cube with bite)
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination (merged large cubes)
+        # Now make sure the indices also behave as we expect.
+        cube = np.intersect1d(i1, i2)
+        bite_cube = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, cube
+        yield assert_array_equal, b2, bite_cube
+        yield assert_array_equal, b3, both
+
+def test_boolean_cylinders_no_overlap():
+    r"""Test to make sure that boolean objects (cylinders, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping cylinders. This also checks that the original cylinders
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.25]*3, [1, 0, 0], 0.1, 0.1)
+        cyl2 = pf.h.disk([0.75]*3, [1, 0, 0], 0.1, 0.1)
+        # Store the original indices
+        i1 = cyl1['ID']
+        i1.sort()
+        i2 = cyl2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # empty
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # only cyl1
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = cyl1['ID']
+        new_i1.sort()
+        new_i2 = cyl2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_cylinders_overlap():
+    r"""Test to make sure that boolean objects (cylinders, overlap)
+    behave the way we expect.
+
+    Test overlapping cylinders.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.45]*3, [1, 0, 0], 0.2, 0.2)
+        cyl2 = pf.h.disk([0.55]*3, [1, 0, 0], 0.2, 0.2)
+        # Get indices of both.
+        i1 = cyl1['ID']
+        i2 = cyl2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # overlap (vertically extened lens)
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # sp1 - sp2 (disk minus a bite)
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination (merged disks)
+        # Now make sure the indices also behave as we expect.
+        vlens = np.intersect1d(i1, i2)
+        bite_disk = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, vlens
+        yield assert_array_equal, b2, bite_disk
+        yield assert_array_equal, b3, both
+
+def test_boolean_ellipsoids_no_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping ellipsoids. This also checks that the original
+    ellipsoids don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Store the original indices
+        i1 = ell1['ID']
+        i1.sort()
+        i2 = ell2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # empty
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # only cyl1
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = ell1['ID']
+        new_i1.sort()
+        new_i2 = ell2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1 
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_ellipsoids_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, overlap)
+    behave the way we expect.
+
+    Test overlapping ellipsoids.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Get indices of both.
+        i1 = ell1['ID']
+        i2 = ell2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # overlap
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # ell1 - ell2
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # Now make sure the indices also behave as we expect.
+        overlap = np.intersect1d(i1, i2)
+        diff = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, overlap
+        yield assert_array_equal, b2, diff
+        yield assert_array_equal, b3, both
+
+def test_boolean_mix_periodicity():
+    r"""Test that a hybrid boolean region behaves as we expect.
+
+    This also tests nested logic and that periodicity works.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re = pf.h.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
+        sp = pf.h.sphere([0.95]*3, 0.3) # wraps around
+        cyl = pf.h.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around
+        # Get original indices
+        rei = re['ID']
+        spi = sp['ID']
+        cyli = cyl['ID']
+        # Make some booleans
+        # whole box minux spherical bites at corners
+        bo1 = pf.h.boolean([re, "NOT", sp])
+        # sphere plus cylinder
+        bo2 = pf.h.boolean([sp, "OR", cyl])
+        # a jumble, the region minus the sp+cyl
+        bo3 = pf.h.boolean([re, "NOT", "(", sp, "OR", cyl, ")"])
+        # Now make sure the indices also behave as we expect.
+        expect = np.setdiff1d(rei, spi)
+        ii = bo1['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(spi, cyli)
+        ii = bo2['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(spi, cyli)
+        expect = np.setdiff1d(rei, expect)
+        ii = bo3['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/data_objects/tests/test_ellipsoid.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -0,0 +1,35 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","loglevel"] = "50"
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_ellipsoid():
+    # We decompose in different ways
+    cs = [np.array([0.5, 0.5, 0.5]),
+          np.array([0.1, 0.2, 0.3]),
+          np.array([0.8, 0.8, 0.8])]
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs)
+        min_dx = 2.0/pf.domain_dimensions
+        ABC = np.random.random((3, 12)) * 0.1
+        e0s = np.random.random((3, 12))
+        tilts = np.random.random(12)
+        ABC[:,0] = 0.1
+        for i in range(12):
+            for c in cs:
+                A, B, C = reversed(sorted(ABC[:,i]))
+                A = max(A, min_dx[0])
+                B = max(B, min_dx[1])
+                C = max(C, min_dx[2])
+                e0 = e0s[:,i]
+                tilt = tilts[i]
+                ell = pf.h.ellipsoid(c, A, B, C, e0, tilt)
+                yield assert_equal, np.all(ell["Radius"] <= A), True
+                p = np.array([ell[ax] for ax in 'xyz'])
+                v  = np.zeros_like(ell["Radius"])
+                v += (((p - c[:,None]) * ell._e0[:,None]).sum(axis=0) / ell._A)**2
+                v += (((p - c[:,None]) * ell._e1[:,None]).sum(axis=0) / ell._B)**2
+                v += (((p - c[:,None]) * ell._e2[:,None]).sum(axis=0) / ell._C)**2
+                yield assert_equal, np.all(np.sqrt(v) <= 1.0), True


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/data_objects/tests/test_extract_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -0,0 +1,53 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_cut_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        r = dd.cut_region( [ "grid['Temperature'] > 0.5",
+                             "grid['Density'] < 0.75",
+                             "grid['x-velocity'] > 0.25" ])
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        t2 = (r["Temperature"] < 0.75)
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+
+def test_extract_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        r = dd.extract_region(t)
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        t2 = (r["Temperature"] < 0.75)
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+        t3 = (r["Temperature"] < 0.75)
+        r3 = r.extract_region( t3 )
+        yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
+        yield assert_equal, np.all(r3["Temperature"] < 0.75), True


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/data_objects/tests/test_ortho_rays.py
--- a/yt/data_objects/tests/test_ortho_rays.py
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -21,5 +21,5 @@
                    (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
                     0.5 * dx[my_axes[1]])
 
-        assert_equal(my_oray['Density'].sum(),
-                     my_all['Density'][my_cells].sum())
+        yield assert_equal, my_oray['Density'].sum(), \
+                            my_all['Density'][my_cells].sum()


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/data_objects/tests/test_rays.py
--- a/yt/data_objects/tests/test_rays.py
+++ b/yt/data_objects/tests/test_rays.py
@@ -1,31 +1,33 @@
 from yt.testing import *
 
 def test_ray():
-    pf = fake_random_pf(64, nprocs=8)
-    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
-      pf.domain_dimensions
+    for nproc in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=nproc)
+        dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
 
-    p1 = np.random.random(3)
-    p2 = np.random.random(3)
+        p1 = np.random.random(3)
+        p2 = np.random.random(3)
 
-    my_ray = pf.h.ray(p1, p2)
-    assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
-    ray_cells = my_ray['dts'] > 0
+        my_ray = pf.h.ray(p1, p2)
+        assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
+        ray_cells = my_ray['dts'] > 0
 
-    # find cells intersected by the ray
-    my_all = pf.h.all_data()
-    
-    dt = np.abs(dx / (p2 - p1))
-    tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
-                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
-                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
-    tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
-                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
-                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
-    tin = tin.max(axis=0)
-    tout = tout.min(axis=0)
-    my_cells = (tin < tout) & (tin < 1) & (tout > 0)
+        # find cells intersected by the ray
+        my_all = pf.h.all_data()
+        
+        dt = np.abs(dx / (p2 - p1))
+        tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
+                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
+                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
+        tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
+                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
+                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
+        tin = tin.max(axis=0)
+        tout = tout.min(axis=0)
+        my_cells = (tin < tout) & (tin < 1) & (tout > 0)
 
-    assert_rel_equal(ray_cells.sum(), my_cells.sum(), 14)
-    assert_rel_equal(my_ray['Density'][ray_cells].sum(),
-                     my_all['Density'][my_cells].sum(), 14)
+        yield assert_rel_equal, ray_cells.sum(), my_cells.sum(), 14
+        yield assert_rel_equal, my_ray['Density'][ray_cells].sum(), \
+                                my_all['Density'][my_cells].sum(), 14
+        yield assert_rel_equal, my_ray['dts'].sum(), 1.0, 14


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -258,8 +258,11 @@
 
         """
         if isinstance(filenames, types.StringTypes):
+            pattern = filenames
             filenames = glob.glob(filenames)
             filenames.sort()
+            if len(filenames) == 0:
+                raise YTNoFilenamesMatchPattern(pattern)
         obj = cls(filenames[:], parallel = parallel)
         return obj
 


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/frontends/enzo/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -0,0 +1,51 @@
+"""
+Enzo frontend tests using moving7
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.enzo.api import EnzoStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
+           "particle_density")
+
+m7 = "DD0010/moving7_0010"
+ at requires_pf(m7)
+def test_moving7():
+    pf = data_dir_load(m7)
+    yield assert_equal, str(pf), "moving7_0010"
+    for test in small_patch_amr(m7, _fields):
+        yield test
+
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+ at requires_pf(g30, big_data=True)
+def test_galaxy0030():
+    pf = data_dir_load(g30)
+    yield assert_equal, str(pf), "galaxy0030"
+    for test in big_patch_amr(g30, _fields):
+        yield test


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -29,6 +29,15 @@
     assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
 
 def assert_rel_equal(a1, a2, decimals):
+    # We have nan checks in here because occasionally we have fields that get
+    # weighted without non-zero weights.  I'm looking at you, particle fields!
+    if isinstance(a1, np.ndarray):
+        assert(a1.size == a2.size)
+        # Mask out NaNs
+        a1[np.isnan(a1)] = 1.0
+        a2[np.isnan(a2)] = 1.0
+    elif np.isnan(a1) and np.isnan(a2):
+        return True
     return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/utilities/answer_testing/__init__.py
--- a/yt/utilities/answer_testing/__init__.py
+++ b/yt/utilities/answer_testing/__init__.py
@@ -22,10 +22,3 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
-import runner
-import output_tests
-from runner import RegressionTestRunner
-
-from output_tests import RegressionTest, SingleOutputTest, \
-    MultipleOutputTest, YTStaticOutputTest, create_test




diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/utilities/answer_testing/default_tests.py
--- a/yt/utilities/answer_testing/default_tests.py
+++ b/yt/utilities/answer_testing/default_tests.py
@@ -67,3 +67,4 @@
         for field in sorted(self.result):
             for p1, p2 in zip(self.result[field], old_result[field]):
                 self.compare_data_arrays(p1, p2, self.tolerance)
+


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/utilities/answer_testing/framework.py
--- /dev/null
+++ b/yt/utilities/answer_testing/framework.py
@@ -0,0 +1,396 @@
+"""
+Answer Testing using Nose as a starting point
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import logging
+import os
+import hashlib
+import contextlib
+import urllib2
+import cPickle
+
+from nose.plugins import Plugin
+from yt.testing import *
+from yt.config import ytcfg
+from yt.mods import *
+import cPickle
+
+from yt.utilities.logger import disable_stream_logging
+from yt.utilities.command_line import get_yt_version
+
+mylog = logging.getLogger('nose.plugins.answer-testing')
+run_big_data = False
+
+_latest = "gold001"
+_url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
+
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=_latest, help="The name against which we will compare")
+        parser.add_option("--answer-big-data", dest="big_data",
+            default=False, help="Should we run against big data, too?",
+            action="store_true")
+        parser.add_option("--answer-name", dest="this_name",
+            default=None,
+            help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+        disable_stream_logging()
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        if options.this_name is None: options.this_name = my_hash
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
+        if options.compare_name is not None:
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
+        global run_big_data
+        run_big_data = options.big_data
+
+    def finalize(self, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestOpener(object):
+    def __init__(self, reference_name):
+        self.reference_name = reference_name
+        self.cache = {}
+
+    def get(self, pf_name, default = None):
+        if pf_name in self.cache: return self.cache[pf_name]
+        url = _url_path % (self.reference_name, pf_name)
+        try:
+            resp = urllib2.urlopen(url)
+            # This is dangerous, but we have a controlled S3 environment
+            data = resp.read()
+            rv = cPickle.loads(data)
+        except urllib2.HTTPError as ex:
+            raise YTNoOldAnswer(url)
+            mylog.warning("Missing %s (%s)", url, ex)
+            rv = default
+        self.cache[pf_name] = rv
+        return rv
+
+ at contextlib.contextmanager
+def temp_cwd(cwd):
+    oldcwd = os.getcwd()
+    os.chdir(cwd)
+    yield
+    os.chdir(oldcwd)
+
+def can_run_pf(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        try:
+            load(pf_fn)
+        except:
+            return False
+    return AnswerTestingTest.result_storage is not None
+
+def data_dir_load(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        pf = load(pf_fn)
+        pf.h
+        return pf
+
+class AnswerTestingTest(object):
+    reference_storage = None
+    def __init__(self, pf_fn):
+        self.pf = data_dir_load(pf_fn)
+
+    def __call__(self):
+        nv = self.run()
+        if self.reference_storage is not None:
+            dd = self.reference_storage.get(str(self.pf))
+            if dd is None: raise YTNoOldAnswer()
+            ov = dd[self.description]
+            self.compare(nv, ov)
+        else:
+            ov = None
+        self.result_storage[str(self.pf)][self.description] = nv
+
+    def compare(self, new_result, old_result):
+        raise RuntimeError
+
+    def create_obj(self, pf, obj_type):
+        # obj_type should be tuple of
+        #  ( obj_name, ( args ) )
+        if obj_type is None:
+            return pf.h.all_data()
+        cls = getattr(pf.h, obj_type[0])
+        obj = cls(*obj_type[1])
+        return obj
+
+    @property
+    def sim_center(self):
+        """
+        This returns the center of the domain.
+        """
+        return 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+
+    @property
+    def max_dens_location(self):
+        """
+        This is a helper function to return the location of the most dense
+        point.
+        """
+        return self.pf.h.find_max("Density")[1]
+
+    @property
+    def entire_simulation(self):
+        """
+        Return an unsorted array of values that cover the entire domain.
+        """
+        return self.pf.h.all_data()
+
+    @property
+    def description(self):
+        obj_type = getattr(self, "obj_type", None)
+        if obj_type is None:
+            oname = "all"
+        else:
+            oname = "_".join((str(s) for s in obj_type))
+        args = [self._type_name, str(self.pf), oname]
+        args += [str(getattr(self, an)) for an in self._attrs]
+        return "_".join(args)
+        
+class FieldValuesTest(AnswerTestingTest):
+    _type_name = "FieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None):
+        super(FieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        avg = obj.quantities["WeightedAverageQuantity"](self.field,
+                             weight="Ones")
+        (mi, ma), = obj.quantities["Extrema"](self.field)
+        return np.array([avg, mi, ma])
+
+    def compare(self, new_result, old_result):
+        assert_equal(new_result, old_result)
+
+class ProjectionValuesTest(AnswerTestingTest):
+    _type_name = "ProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(ProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        return proj.field_data
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class PixelizedProjectionValuesTest(AnswerTestingTest):
+    _type_name = "PixelizedProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(PixelizedProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        frb = proj.to_frb((1.0, 'unitary'), 256)
+        frb[self.field]
+        frb[self.weight_field]
+        d = frb.data
+        d.update( dict( (("%s_sum" % f, proj[f].sum(dtype="float64"))
+                         for f in proj.field_data.keys()) ) )
+        return d
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_rel_equal(new_result[k], old_result[k], 10)
+
+class GridValuesTest(AnswerTestingTest):
+    _type_name = "GridValues"
+    _attrs = ("field",)
+
+    def __init__(self, pf_fn, field):
+        super(GridValuesTest, self).__init__(pf_fn)
+        self.field = field
+
+    def run(self):
+        hashes = {}
+        for g in self.pf.h.grids:
+            hashes[g.id] = hashlib.md5(g[self.field].tostring()).hexdigest()
+            g.clear_data()
+        return hashes
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class GridHierarchyTest(AnswerTestingTest):
+    _type_name = "GridHierarchy"
+    _attrs = ()
+
+    def run(self):
+        result = {}
+        result["grid_dimensions"] = self.pf.h.grid_dimensions
+        result["grid_left_edges"] = self.pf.h.grid_left_edge
+        result["grid_right_edges"] = self.pf.h.grid_right_edge
+        result["grid_levels"] = self.pf.h.grid_levels
+        result["grid_particle_count"] = self.pf.h.grid_particle_count
+        return result
+
+    def compare(self, new_result, old_result):
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class ParentageRelationshipsTest(AnswerTestingTest):
+    _type_name = "ParentageRelationships"
+    _attrs = ()
+    def run(self):
+        result = {}
+        result["parents"] = []
+        result["children"] = []
+        for g in self.pf.h.grids:
+            p = g.Parent
+            if p is None:
+                result["parents"].append(None)
+            elif hasattr(p, "id"):
+                result["parents"].append(p.id)
+            else:
+                result["parents"].append([pg.id for pg in p])
+            result["children"].append([c.id for c in g.Children])
+        return result
+
+    def compare(self, new_result, old_result):
+        for newp, oldp in zip(new_result["parents"], old_result["parents"]):
+            assert(newp == oldp)
+        for newc, oldc in zip(new_result["children"], old_result["children"]):
+            assert(newp == oldp)
+
+def requires_pf(pf_fn, big_data = False):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    if run_big_data == False and big_data == True:
+        return ffalse
+    elif not can_run_pf(pf_fn):
+        return ffalse
+    else:
+        return ftrue
+
+def small_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+
+def big_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -29,14 +29,12 @@
 # We first create our dictionary of tests to run.  This starts out empty, and
 # as tests are imported it will be filled.
 if "TestRegistry" not in locals():
-    print "Initializing TestRegistry"
     class TestRegistry(dict):
         def __new__(cls, *p, **k):
             if not '_the_instance' in cls.__dict__:
                 cls._the_instance = dict.__new__(cls)
                 return cls._the_instance
 if "test_registry" not in locals():
-    print "Initializing test_registry"
     test_registry = TestRegistry()
 
 # The exceptions we raise, related to the character of the failure.


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -146,3 +146,29 @@
     def __str__(self):
         return "You must create an API key before uploading.  See " + \
                "https://data.yt-project.org/getting_started.html"
+
+class YTNoFilenamesMatchPattern(YTException):
+    def __init__(self, pattern):
+        self.pattern = pattern
+
+    def __str__(self):
+        return "No filenames were found to match the pattern: " + \
+               "'%s'" % (self.pattern)
+
+class YTNoOldAnswer(YTException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "There is no old answer available.\n" + \
+               str(self.path)
+
+class YTEllipsoidOrdering(YTException):
+    def __init__(self, pf, A, B, C):
+        YTException.__init__(self, pf)
+        self._A = A
+        self._B = B
+        self._C = C
+
+    def __str__(self):
+        return "Must have A>=B>=C"


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -7,6 +7,8 @@
 Affiliation: UC Berkeley
 Author: Stephen Skory <s at skory.us>
 Affiliation: UC San Diego
+Author: Anthony Scopatz <scopatz at gmail.com>
+Affiliation: The University of Chicago
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008-2011 Matthew Turk, JS Oishi, Stephen Skory.  All Rights Reserved.
@@ -211,7 +213,7 @@
 class ContourCallback(PlotCallback):
     _type_name = "contour"
     def __init__(self, field, ncont=5, factor=4, clim=None,
-                 plot_args = None):
+                 plot_args = None, label = False, label_args = None):
         """
         annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None,
                          plot_args = None):
@@ -230,6 +232,10 @@
         self.clim = clim
         if plot_args is None: plot_args = {'colors':'k'}
         self.plot_args = plot_args
+        self.label = label
+        if label_args is None:
+            label_args = {}
+        self.label_args = label_args
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -288,12 +294,16 @@
             self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 
-            self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
+            self.ncont = np.linspace(self.clim[0], self.clim[1], self.ncont)
         
-        plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
+        cset = plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
+        
+        if self.label:
+            plot._axes.clabel(cset, **self.label_args)
+        
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
@@ -440,30 +450,6 @@
         plot._axes.set_xlabel(self.label)
         plot._axes.set_ylabel(self.label)
 
-class TimeCallback(PlotCallback):
-    _type_name = "time"
-    def __init__(self, format_code='10.7e'):
-        """
-        This annotates the plot with the current simulation time.
-        For now, the time is displayed in seconds.
-        *format_code* can be optionally set, allowing a custom 
-        c-style format code for the time display.
-        """
-        self.format_code = format_code
-        PlotCallback.__init__(self)
-    
-    def __call__(self, plot):
-        current_time = plot.pf.current_time/plot.pf['Time']
-        timestring = format(current_time,self.format_code)
-        base = timestring[:timestring.find('e')]
-        exponent = timestring[timestring.find('e')+1:]
-        if exponent[0] == '+':
-            exponent = exponent[1:]
-        timestring = r'$t\/=\/'+base+''+r'\times\,10^{'+exponent+r'}\, \rm{s}$'
-        from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
-        at = AnchoredText(timestring, prop=dict(size=12), frameon=True, loc=4)
-        plot._axes.add_artist(at)
-
 def get_smallest_appropriate_unit(v, pf):
     max_nu = 1e30
     good_u = None
@@ -1103,3 +1089,152 @@
     def __call__(self,plot):
         plot._axes.set_title(self.title)
 
+class FlashRayDataCallback(PlotCallback):
+    _type_name = "flash_ray_data"
+    def __init__(self, cmap_name='bone', sample=None):
+        """ 
+        annotate_flash_ray_data(cmap_name='bone', sample=None)
+
+        Adds ray trace data to the plot.  *cmap_name* is the name of the color map 
+        ('bone', 'jet', 'hot', etc).  *sample* dictates the amount of down sampling 
+        to do to prevent all of the rays from being  plotted.  This may be None 
+        (plot all rays, default), an integer (step size), or a slice object.
+        """
+        self.cmap_name = cmap_name
+        self.sample = sample if isinstance(sample, slice) else slice(None, None, sample)
+
+    def __call__(self, plot):
+        ray_data = plot.data.pf._handle["RayData"][:]
+        idx = ray_data[:,0].argsort(kind="mergesort")
+        ray_data = ray_data[idx]
+
+        tags = ray_data[:,0]
+        coords = ray_data[:,1:3]
+        power = ray_data[:,4]
+        power /= power.max()
+        cx, cy = self.convert_to_plot(plot, coords.T)
+        coords[:,0], coords[:,1] = cx, cy
+        splitidx = np.argwhere(0 < (tags[1:] - tags[:-1])) + 1
+        coords = np.split(coords, splitidx.flat)[self.sample]
+        power = np.split(power, splitidx.flat)[self.sample]
+        cmap = matplotlib.cm.get_cmap(self.cmap_name)
+
+        plot._axes.hold(True)
+        colors = [cmap(p.max()) for p in power]
+        lc = matplotlib.collections.LineCollection(coords, colors=colors)
+        plot._axes.add_collection(lc)
+        plot._axes.hold(False)
+
+
+class TimestampCallback(PlotCallback):
+    _type_name = "timestamp"
+    _time_conv = {
+          'as': 1e-18,
+          'attosec': 1e-18,
+          'attosecond': 1e-18,
+          'attoseconds': 1e-18,
+          'fs': 1e-15,
+          'femtosec': 1e-15,
+          'femtosecond': 1e-15,
+          'femtoseconds': 1e-15,
+          'ps': 1e-12,
+          'picosec': 1e-12,
+          'picosecond': 1e-12,
+          'picoseconds': 1e-12,
+          'ns': 1e-9,
+          'nanosec': 1e-9,
+          'nanosecond':1e-9,
+          'nanoseconds' : 1e-9,
+          'us': 1e-6,
+          'microsec': 1e-6,
+          'microsecond': 1e-6,
+          'microseconds': 1e-6,
+          'ms': 1e-3,
+          'millisec': 1e-3,
+          'millisecond': 1e-3,
+          'milliseconds': 1e-3,
+          's': 1.0,
+          'sec': 1.0,
+          'second':1.0,
+          'seconds': 1.0,
+          'm': 60.0,
+          'min': 60.0,
+          'minute': 60.0,
+          'minutes': 60.0,
+          'h': 3600.0,
+          'hour': 3600.0,
+          'hours': 3600.0,
+          'd': 86400.0,
+          'day': 86400.0,
+          'days': 86400.0,
+          'y': 86400.0*365.25,
+          'year': 86400.0*365.25,
+          'years': 86400.0*365.25,
+          'ev': 1e-9 * 7.6e-8 / 6.03,
+          'kev': 1e-12 * 7.6e-8 / 6.03,
+          'mev': 1e-15 * 7.6e-8 / 6.03,
+          }
+
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", **kwargs):
+        """ 
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
+
+        Adds the current time to the plot at point given by *x* and *y*.  If *units* 
+        is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 
+        *units* is None, it will attempt to figure out the correct value by which to 
+        scale.  The *format* keyword is a template string that will be evaluated and 
+        displayed on the plot.  All other *kwargs* will be passed to the text() 
+        method on the plot axes.  See matplotlib's text() functions for more 
+        information.
+        """
+        self.x = x
+        self.y = y
+        self.format = format
+        self.units = units
+        self.kwargs = {'color': 'w'}
+        self.kwargs.update(kwargs)
+
+    def __call__(self, plot):
+        if self.units is None:
+            t = plot.data.pf.current_time
+            scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
+            self.units = 's'
+            for k in scale_keys:
+                if t < self._time_conv[k]:
+                    break
+                self.units = k
+        t = plot.data.pf.current_time / self._time_conv[self.units.lower()]
+        if self.units == 'us':
+            self.units = '$\\mu s$'
+        s = self.format.format(time=t, units=self.units)
+        plot._axes.hold(True)
+        plot._axes.text(self.x, self.y, s, **self.kwargs)
+        plot._axes.hold(False)
+
+
+class MaterialBoundaryCallback(ContourCallback):
+    _type_name = "material_boundary"
+    def __init__(self, field='targ', ncont=1, factor=4, clim=(0.9, 1.0), **kwargs):
+        """ 
+        annotate_material_boundary(self, field='targ', ncont=1, factor=4, 
+                                   clim=(0.9, 1.0), **kwargs):
+
+        Add the limiting contours of *field* to the plot.  Nominally, *field* is 
+        the target material but may be any other field present in the hierarchy.
+        The number of contours generated is given by *ncount*, *factor* governs 
+        the number of points used in the interpolation, and *clim* gives the 
+        (upper, lower) limits for contouring.  For this to truly be the boundary
+        *clim* should be close to the edge.  For example the default is (0.9, 1.0)
+        for 'targ' which is defined on the range [0.0, 1.0].  All other *kwargs* 
+        will be passed to the contour() method on the plot axes.  See matplotlib
+        for more information.
+        """
+        plot_args = {'colors': 'w'}
+        plot_args.update(kwargs)
+        super(MaterialBoundaryCallback, self).__init__(field=field, ncont=ncont,
+                                                       factor=factor, clim=clim,
+                                                       plot_args=plot_args)
+
+    def __call__(self, plot):
+        super(MaterialBoundaryCallback, self).__call__(plot)
+


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1449,7 +1449,7 @@
             yield self.snapshot()
 
 def allsky_projection(pf, center, radius, nside, field, weight = None,
-                      inner_radius = 10, rotation = None):
+                      inner_radius = 10, rotation = None, source = None):
     r"""Project through a parameter file, through an allsky-method
     decomposition from HEALpix, and return the image plane.
 
@@ -1484,6 +1484,9 @@
         If supplied, the vectors will be rotated by this.  You can construct
         this by, for instance, calling np.array([v1,v2,v3]) where those are the
         three reference planes of an orthogonal frame (see ortho_find).
+    source : data container, default None
+        If this is supplied, this gives the data source from which the all sky
+        projection pulls its data from.
 
     Returns
     -------
@@ -1527,12 +1530,20 @@
     positions += inner_radius * dx * vs
     vs *= radius
     uv = np.ones(3, dtype='float64')
-    grids = pf.h.sphere(center, radius)._grids
+    if source is not None:
+        grids = source._grids
+    else:
+        grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
                                 image, uv, uv, np.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
-        data = [grid[field] * grid.child_mask.astype('float64')
+        if source is not None:
+            data = [grid[field] * source._get_cut_mask(grid) * \
+                grid.child_mask.astype('float64')
+                for field in fields]
+        else:
+            data = [grid[field] * grid.child_mask.astype('float64')
                 for field in fields]
         pg = PartitionedGrid(
             grid.id, data,



https://bitbucket.org/yt_analysis/yt-3.0/changeset/30892e3e9006/
changeset:   30892e3e9006
branch:      yt
user:        MatthewTurk
date:        2012-10-26 13:25:21
summary:     Sum the dts in a given cell.  It's important to note that this does not (yet)
correctly convey when a cell has been visited multiple times.
affected #:  2 files

diff -r 189d2eace2e91e9ac7aeae9593fe465b50b6cf92 -r 30892e3e900620f83eeb338202309c488bcff06e yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -788,11 +788,10 @@
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
             for j in range(3):
                 ci[j] = min(ci[j], grid.ActiveDimensions[j]-1)
-            if mask[ci[0], ci[1], ci[2]]:
-                continue
+            if not mask[ci[0], ci[1], ci[2]]:
+                ts[ci[0], ci[1], ci[2]] = self.ts[i]
             mask[ci[0], ci[1], ci[2]] = 1
-            dts[ci[0], ci[1], ci[2]] = self.dts[i]
-            ts[ci[0], ci[1], ci[2]] = self.ts[i]
+            dts[ci[0], ci[1], ci[2]] += self.dts[i]
         self._dts[grid.id] = dts
         self._ts[grid.id] = ts
         return mask


diff -r 189d2eace2e91e9ac7aeae9593fe465b50b6cf92 -r 30892e3e900620f83eeb338202309c488bcff06e yt/data_objects/tests/test_streamlines.py
--- /dev/null
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -0,0 +1,22 @@
+from yt.testing import *
+from yt.visualization.api import Streamlines
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_covering_grid():
+    # We decompose in different ways
+    cs = np.mgrid[0.47:0.53:2j,0.47:0.53:2j,0.47:0.53:2j]
+    cs = np.array([a.ravel() for a in cs]).T
+    length = (1.0/128) * 16 # 16 half-widths of a cell
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
+        streams = Streamlines(pf, cs)
+        streams.integrate_through_volume()
+        for path in (streams.path(i) for i in range(8)):
+            yield assert_rel_equal, path['dts'].sum(), 1.0, 14
+            assert_rel_equal(path['dts'].sum(), 1.0, 14)
+            yield assert_array_less, path['t'], 1.0



https://bitbucket.org/yt_analysis/yt-3.0/changeset/6ae44687aeca/
changeset:   6ae44687aeca
branch:      yt
user:        MatthewTurk
date:        2012-10-26 13:26:33
summary:     Merging with Sam's changes
affected #:  3 files

diff -r 30892e3e900620f83eeb338202309c488bcff06e -r 6ae44687aeca2f8cae9269865be39c9a8cfffe3a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -749,7 +749,7 @@
         self.dts = np.empty_like(positions[:,0])
         self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
-        self.dts[-1] = self.dts[-1]
+        self.dts[-1] = self.dts[-2]
         self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])


diff -r 30892e3e900620f83eeb338202309c488bcff06e -r 6ae44687aeca2f8cae9269865be39c9a8cfffe3a yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,6 +29,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
+from yt.data_objects.image_array import ImageArray
 import _MPL
 import numpy as np
 import weakref
@@ -133,8 +134,9 @@
                              self.bounds, int(self.antialias),
                              self._period, int(self.periodic),
                              ).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
     def __setitem__(self, item, val):
         self.data[item] = val
@@ -145,6 +147,28 @@
             if f not in exclude:
                 self[f]
 
+    def _get_info(self, item):
+        info = {}
+        info['data_source'] = self.data_source.__str__()  
+        info['axis'] = self.data_source.axis
+        info['field'] = str(item)
+        info['units'] = self.data_source.pf.field_info[item].get_units()
+        info['xlim'] = self.bounds[:2]
+        info['ylim'] = self.bounds[2:]
+        info['length_to_cm'] = self.data_source.pf['cm']
+        info['projected_units'] = \
+                self.data_source.pf.field_info[item].get_projected_units()
+        info['center'] = self.data_source.center
+        try:
+            info['coord'] = self.data_source.coord
+        except AttributeError:
+            pass
+        try:
+            info['weight_field'] = self.data_source.weight_field
+        except AttributeError:
+            pass
+        return info
+
     def convert_to_pixel(self, coords):
         r"""This function converts coordinates in code-space to pixel-space.
 


diff -r 30892e3e900620f83eeb338202309c488bcff06e -r 6ae44687aeca2f8cae9269865be39c9a8cfffe3a yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -118,7 +118,9 @@
         if length is None:
             length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
-        self.steps = int(length/dx)
+        self.steps = int(length/dx)+1
+        # Fix up the dx.
+        self.dx = 1.0*self.length/self.steps
         self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a67f87642c83/
changeset:   a67f87642c83
branch:      yt
user:        MatthewTurk
date:        2012-10-26 13:43:09
summary:     Switch out the call to Streamlines to a call to the hierarchy.  Add a path
normalization option to get back the correct dts in a streamlines.  Tests all
pass.
affected #:  3 files

diff -r 6ae44687aeca2f8cae9269865be39c9a8cfffe3a -r a67f87642c8306f18102c905f4cc3c663e1782b8 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -709,7 +709,7 @@
     _type_name = "streamline"
     _con_args = ('positions')
     sort_by = 't'
-    def __init__(self, positions, fields=None, pf=None, **kwargs):
+    def __init__(self, positions, length = 1.0, fields=None, pf=None, **kwargs):
         """
         This is a streamline, which is a set of points defined as
         being parallel to some vector field.
@@ -725,6 +725,8 @@
         ----------
         positions : array-like
             List of streamline positions
+        length : float
+            The magnitude of the distance; dts will be divided by this
         fields : list of strings, optional
             If you want the object to pre-retrieve a set of fields, supply them
             here.  This is not necessary.
@@ -750,6 +752,8 @@
         self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
         self.dts[-1] = self.dts[-2]
+        self.length = length
+        self.dts /= length
         self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])


diff -r 6ae44687aeca2f8cae9269865be39c9a8cfffe3a -r a67f87642c8306f18102c905f4cc3c663e1782b8 yt/data_objects/tests/test_streamlines.py
--- a/yt/data_objects/tests/test_streamlines.py
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -14,9 +14,8 @@
     length = (1.0/128) * 16 # 16 half-widths of a cell
     for nprocs in [1, 2, 4, 8]:
         pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
-        streams = Streamlines(pf, cs)
+        streams = Streamlines(pf, cs, length=length)
         streams.integrate_through_volume()
         for path in (streams.path(i) for i in range(8)):
             yield assert_rel_equal, path['dts'].sum(), 1.0, 14
-            assert_rel_equal(path['dts'].sum(), 1.0, 14)
-            yield assert_array_less, path['t'], 1.0
+            yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True


diff -r 6ae44687aeca2f8cae9269865be39c9a8cfffe3a -r a67f87642c8306f18102c905f4cc3c663e1782b8 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -208,5 +208,6 @@
         >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
         
         """
-        return AMRStreamlineBase(self.streamlines[streamline_id], pf=self.pf)
+        return self.pf.h.streamline(self.streamlines[streamline_id],
+                                    length = self.length)
         



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b54dc03a34ee/
changeset:   b54dc03a34ee
branch:      yt
user:        MatthewTurk
date:        2012-10-26 14:12:59
summary:     Allow cells to appear multiple times in a streamline.  This can probably be
vectorized.
affected #:  2 files

diff -r a67f87642c8306f18102c905f4cc3c663e1782b8 -r b54dc03a34eec3fe3b472dcb993bd93dbd5f8f54 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -772,30 +772,30 @@
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = np.logical_and(self._get_cut_mask(grid),
-                              grid.child_mask)
-        if field == 'dts': return self._dts[grid.id][mask]
-        if field == 't': return self._ts[grid.id][mask]
-        return grid[field][mask]
+        # No child masking here; it happens inside the mask cut
+        mask = self._get_cut_mask(grid) 
+        if field == 'dts': return self._dts[grid.id]
+        if field == 't': return self._ts[grid.id]
+        return grid[field].flat[mask]
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = np.zeros(grid.ActiveDimensions, dtype='int')
-        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
                          np.all(self.positions <= grid.RightEdge, axis=1) 
         pids = np.where(points_in_grid)[0]
-        for i, pos in zip(pids, self.positions[points_in_grid]):
+        mask = np.zeros(points_in_grid.sum(), dtype='int')
+        dts = np.zeros(points_in_grid.sum(), dtype='float64')
+        ts = np.zeros(points_in_grid.sum(), dtype='float64')
+        for mi, (i, pos) in enumerate(zip(pids, self.positions[points_in_grid])):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
+            if grid.child_mask[ci[0], ci[1], ci[2]] == 0: continue
             for j in range(3):
                 ci[j] = min(ci[j], grid.ActiveDimensions[j]-1)
-            if not mask[ci[0], ci[1], ci[2]]:
-                ts[ci[0], ci[1], ci[2]] = self.ts[i]
-            mask[ci[0], ci[1], ci[2]] = 1
-            dts[ci[0], ci[1], ci[2]] += self.dts[i]
+            mask[mi] = np.ravel_multi_index(ci, grid.ActiveDimensions)
+            dts[mi] = self.dts[i]
+            ts[mi] = self.ts[i]
         self._dts[grid.id] = dts
         self._ts[grid.id] = ts
         return mask


diff -r a67f87642c8306f18102c905f4cc3c663e1782b8 -r b54dc03a34eec3fe3b472dcb993bd93dbd5f8f54 yt/data_objects/tests/test_streamlines.py
--- a/yt/data_objects/tests/test_streamlines.py
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -19,3 +19,4 @@
         for path in (streams.path(i) for i in range(8)):
             yield assert_rel_equal, path['dts'].sum(), 1.0, 14
             yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
+            path["Density"]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3d8fd5d2b393/
changeset:   3d8fd5d2b393
branch:      yt
user:        samskillman
date:        2012-10-30 20:46:57
summary:     Fixing weight field issue.
affected #:  1 file

diff -r b54dc03a34eec3fe3b472dcb993bd93dbd5f8f54 -r 3d8fd5d2b393a0f07df381ec534d88f64bad97c1 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -164,7 +164,9 @@
         except AttributeError:
             pass
         try:
-            info['weight_field'] = self.data_source.weight_field
+            weight = self.data_source.weight_field
+            if weight is None: weight = 'None'
+            info['weight_field'] = weight
         except AttributeError:
             pass
         return info



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3972786cc04e/
changeset:   3972786cc04e
branch:      yt
user:        samskillman
date:        2012-10-30 21:21:35
summary:     Adding tests for FRB generated ImageArrays, and adding a test_slice.py that currently does not check the slice's correct values.
affected #:  3 files

diff -r 3d8fd5d2b393a0f07df381ec534d88f64bad97c1 -r 3972786cc04e9e66747a2f49196675e45ee66408 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,6 +1,4 @@
 from yt.testing import *
-from yt.data_objects.profiles import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
 
 def setup():
     from yt.config import ytcfg
@@ -32,8 +30,30 @@
                 yield assert_equal, np.unique(proj["py"]), uc[yax]
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = proj.to_frb((1.0,'unitary'), 64)
+                for proj_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[proj_field].info['data_source'], \
+                            proj.__str__()
+                    yield assert_equal, frb[proj_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[proj_field].info['field'], \
+                            proj_field
+                    yield assert_equal, frb[proj_field].info['units'], \
+                            pf.field_info[proj_field].get_units()
+                    yield assert_equal, frb[proj_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[proj_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[proj_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[proj_field].info['center'], \
+                            proj.center
+                    yield assert_equal, frb[proj_field].info['weight_field'], \
+                            wf
             # wf == None
             yield assert_equal, wf, None
             v1 = proj["Density"].sum()
             v2 = (dd["Density"] * dd["d%s" % an]).sum()
             yield assert_rel_equal, v1, v2, 10
+
+


diff -r 3d8fd5d2b393a0f07df381ec534d88f64bad97c1 -r 3972786cc04e9e66747a2f49196675e45ee66408 yt/data_objects/tests/test_slice.py
--- /dev/null
+++ b/yt/data_objects/tests/test_slice.py
@@ -0,0 +1,55 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_slice():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        slc_pos = 0.5
+        # Some simple slice tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                slc = pf.h.slice(ax, slc_pos, ["Ones", "Density"])
+                yield assert_equal, slc["Ones"].sum(), slc["Ones"].size
+                yield assert_equal, slc["Ones"].min(), 1.0
+                yield assert_equal, slc["Ones"].max(), 1.0
+                yield assert_equal, np.unique(slc["px"]), uc[xax]
+                yield assert_equal, np.unique(slc["py"]), uc[yax]
+                yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = slc.to_frb((1.0,'unitary'), 64)
+                for slc_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[slc_field].info['data_source'], \
+                            slc.__str__()
+                    yield assert_equal, frb[slc_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[slc_field].info['field'], \
+                            slc_field
+                    yield assert_equal, frb[slc_field].info['units'], \
+                            pf.field_info[slc_field].get_units()
+                    yield assert_equal, frb[slc_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[slc_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[slc_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[slc_field].info['center'], \
+                            slc.center
+                    yield assert_equal, frb[slc_field].info['coord'], \
+                            slc_pos
+            # wf == None
+            yield assert_equal, wf, None
+
+


diff -r 3d8fd5d2b393a0f07df381ec534d88f64bad97c1 -r 3972786cc04e9e66747a2f49196675e45ee66408 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -164,9 +164,7 @@
         except AttributeError:
             pass
         try:
-            weight = self.data_source.weight_field
-            if weight is None: weight = 'None'
-            info['weight_field'] = weight
+            info['weight_field'] = self.data_source.weight_field
         except AttributeError:
             pass
         return info



https://bitbucket.org/yt_analysis/yt-3.0/changeset/7211b81e0ac6/
changeset:   7211b81e0ac6
branch:      yt
user:        MatthewTurk
date:        2012-10-30 21:34:32
summary:     Merged in samskillman/yt (pull request #317)
affected #:  6 files

diff -r 8db15e870d38beaf2ee597db41f076dc0b946aa4 -r 7211b81e0ac61db6682f987a2838caa6bb743e16 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -709,7 +709,7 @@
     _type_name = "streamline"
     _con_args = ('positions')
     sort_by = 't'
-    def __init__(self, positions, fields=None, pf=None, **kwargs):
+    def __init__(self, positions, length = 1.0, fields=None, pf=None, **kwargs):
         """
         This is a streamline, which is a set of points defined as
         being parallel to some vector field.
@@ -725,6 +725,8 @@
         ----------
         positions : array-like
             List of streamline positions
+        length : float
+            The magnitude of the distance; dts will be divided by this
         fields : list of strings, optional
             If you want the object to pre-retrieve a set of fields, supply them
             here.  This is not necessary.
@@ -749,7 +751,9 @@
         self.dts = np.empty_like(positions[:,0])
         self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
-        self.dts[-1] = self.dts[-1]
+        self.dts[-1] = self.dts[-2]
+        self.length = length
+        self.dts /= length
         self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])
@@ -768,31 +772,30 @@
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = np.logical_and(self._get_cut_mask(grid),
-                              grid.child_mask)
-        if field == 'dts': return self._dts[grid.id][mask]
-        if field == 't': return self._ts[grid.id][mask]
-        return grid[field][mask]
+        # No child masking here; it happens inside the mask cut
+        mask = self._get_cut_mask(grid) 
+        if field == 'dts': return self._dts[grid.id]
+        if field == 't': return self._ts[grid.id]
+        return grid[field].flat[mask]
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = np.zeros(grid.ActiveDimensions, dtype='int')
-        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
                          np.all(self.positions <= grid.RightEdge, axis=1) 
         pids = np.where(points_in_grid)[0]
-        for i, pos in zip(pids, self.positions[points_in_grid]):
+        mask = np.zeros(points_in_grid.sum(), dtype='int')
+        dts = np.zeros(points_in_grid.sum(), dtype='float64')
+        ts = np.zeros(points_in_grid.sum(), dtype='float64')
+        for mi, (i, pos) in enumerate(zip(pids, self.positions[points_in_grid])):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
+            if grid.child_mask[ci[0], ci[1], ci[2]] == 0: continue
             for j in range(3):
                 ci[j] = min(ci[j], grid.ActiveDimensions[j]-1)
-            if mask[ci[0], ci[1], ci[2]]:
-                continue
-            mask[ci[0], ci[1], ci[2]] = 1
-            dts[ci[0], ci[1], ci[2]] = self.dts[i]
-            ts[ci[0], ci[1], ci[2]] = self.ts[i]
+            mask[mi] = np.ravel_multi_index(ci, grid.ActiveDimensions)
+            dts[mi] = self.dts[i]
+            ts[mi] = self.ts[i]
         self._dts[grid.id] = dts
         self._ts[grid.id] = ts
         return mask


diff -r 8db15e870d38beaf2ee597db41f076dc0b946aa4 -r 7211b81e0ac61db6682f987a2838caa6bb743e16 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,6 +1,4 @@
 from yt.testing import *
-from yt.data_objects.profiles import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
 
 def setup():
     from yt.config import ytcfg
@@ -32,8 +30,30 @@
                 yield assert_equal, np.unique(proj["py"]), uc[yax]
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = proj.to_frb((1.0,'unitary'), 64)
+                for proj_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[proj_field].info['data_source'], \
+                            proj.__str__()
+                    yield assert_equal, frb[proj_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[proj_field].info['field'], \
+                            proj_field
+                    yield assert_equal, frb[proj_field].info['units'], \
+                            pf.field_info[proj_field].get_units()
+                    yield assert_equal, frb[proj_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[proj_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[proj_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[proj_field].info['center'], \
+                            proj.center
+                    yield assert_equal, frb[proj_field].info['weight_field'], \
+                            wf
             # wf == None
             yield assert_equal, wf, None
             v1 = proj["Density"].sum()
             v2 = (dd["Density"] * dd["d%s" % an]).sum()
             yield assert_rel_equal, v1, v2, 10
+
+


diff -r 8db15e870d38beaf2ee597db41f076dc0b946aa4 -r 7211b81e0ac61db6682f987a2838caa6bb743e16 yt/data_objects/tests/test_slice.py
--- /dev/null
+++ b/yt/data_objects/tests/test_slice.py
@@ -0,0 +1,55 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_slice():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        slc_pos = 0.5
+        # Some simple slice tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                slc = pf.h.slice(ax, slc_pos, ["Ones", "Density"])
+                yield assert_equal, slc["Ones"].sum(), slc["Ones"].size
+                yield assert_equal, slc["Ones"].min(), 1.0
+                yield assert_equal, slc["Ones"].max(), 1.0
+                yield assert_equal, np.unique(slc["px"]), uc[xax]
+                yield assert_equal, np.unique(slc["py"]), uc[yax]
+                yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = slc.to_frb((1.0,'unitary'), 64)
+                for slc_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[slc_field].info['data_source'], \
+                            slc.__str__()
+                    yield assert_equal, frb[slc_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[slc_field].info['field'], \
+                            slc_field
+                    yield assert_equal, frb[slc_field].info['units'], \
+                            pf.field_info[slc_field].get_units()
+                    yield assert_equal, frb[slc_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[slc_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[slc_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[slc_field].info['center'], \
+                            slc.center
+                    yield assert_equal, frb[slc_field].info['coord'], \
+                            slc_pos
+            # wf == None
+            yield assert_equal, wf, None
+
+


diff -r 8db15e870d38beaf2ee597db41f076dc0b946aa4 -r 7211b81e0ac61db6682f987a2838caa6bb743e16 yt/data_objects/tests/test_streamlines.py
--- /dev/null
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -0,0 +1,22 @@
+from yt.testing import *
+from yt.visualization.api import Streamlines
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_covering_grid():
+    # We decompose in different ways
+    cs = np.mgrid[0.47:0.53:2j,0.47:0.53:2j,0.47:0.53:2j]
+    cs = np.array([a.ravel() for a in cs]).T
+    length = (1.0/128) * 16 # 16 half-widths of a cell
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
+        streams = Streamlines(pf, cs, length=length)
+        streams.integrate_through_volume()
+        for path in (streams.path(i) for i in range(8)):
+            yield assert_rel_equal, path['dts'].sum(), 1.0, 14
+            yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
+            path["Density"]


diff -r 8db15e870d38beaf2ee597db41f076dc0b946aa4 -r 7211b81e0ac61db6682f987a2838caa6bb743e16 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,6 +29,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
+from yt.data_objects.image_array import ImageArray
 import _MPL
 import numpy as np
 import weakref
@@ -133,8 +134,9 @@
                              self.bounds, int(self.antialias),
                              self._period, int(self.periodic),
                              ).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
     def __setitem__(self, item, val):
         self.data[item] = val
@@ -145,6 +147,28 @@
             if f not in exclude:
                 self[f]
 
+    def _get_info(self, item):
+        info = {}
+        info['data_source'] = self.data_source.__str__()  
+        info['axis'] = self.data_source.axis
+        info['field'] = str(item)
+        info['units'] = self.data_source.pf.field_info[item].get_units()
+        info['xlim'] = self.bounds[:2]
+        info['ylim'] = self.bounds[2:]
+        info['length_to_cm'] = self.data_source.pf['cm']
+        info['projected_units'] = \
+                self.data_source.pf.field_info[item].get_projected_units()
+        info['center'] = self.data_source.center
+        try:
+            info['coord'] = self.data_source.coord
+        except AttributeError:
+            pass
+        try:
+            info['weight_field'] = self.data_source.weight_field
+        except AttributeError:
+            pass
+        return info
+
     def convert_to_pixel(self, coords):
         r"""This function converts coordinates in code-space to pixel-space.
 


diff -r 8db15e870d38beaf2ee597db41f076dc0b946aa4 -r 7211b81e0ac61db6682f987a2838caa6bb743e16 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -118,7 +118,9 @@
         if length is None:
             length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
-        self.steps = int(length/dx)
+        self.steps = int(length/dx)+1
+        # Fix up the dx.
+        self.dx = 1.0*self.length/self.steps
         self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:
@@ -206,5 +208,6 @@
         >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
         
         """
-        return AMRStreamlineBase(self.streamlines[streamline_id], pf=self.pf)
+        return self.pf.h.streamline(self.streamlines[streamline_id],
+                                    length = self.length)
         



https://bitbucket.org/yt_analysis/yt-3.0/changeset/505e955ac0d2/
changeset:   505e955ac0d2
branch:      yt
user:        chiffre
date:        2012-10-31 13:50:35
summary:     Fixed Latex-Syntax
affected #:  2 files

diff -r 8db15e870d38beaf2ee597db41f076dc0b946aa4 -r 505e955ac0d2f6545d90d591dbee05694f950bc8 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -99,8 +99,8 @@
         add_field(fn1.split("_")[0] + "_Density",
                   function=_get_density(fn1), take_log=True,
                   display_name="%s\/Density" % fn1.split("_")[0],
-                  units = r"\rm{g}/\rm{cm}^3",
-                  projected_units = r"\rm{g}/\rm{cm}^2",
+                  units = r"\rm{g}/\rm{cm}^{3}",
+                  projected_units = r"\rm{g}/\rm{cm}^{2}",
                   )
 
 def _get_convert(fname):
@@ -110,8 +110,8 @@
 
 add_flash_field("dens", function=NullFunc, take_log=True,
                 convert_function=_get_convert("dens"),
-                units=r"\rm{g}/\rm{cm}^3",
-                projected_units = r"\rm{g}/\rm{cm}^2"),
+                units=r"\rm{g}/\rm{cm}^{3}",
+                projected_units = r"\rm{g}/\rm{cm}^{2}"),
 add_flash_field("velx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("velx"),
                 units=r"\rm{cm}/\rm{s}")
@@ -159,10 +159,10 @@
                 units = r"\rm{K}")
 add_flash_field("pres", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pres"),
-                units=r"\rm{erg}\//\/\rm{cm}^{3}")
+                units=r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("pden", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pden"),
-                units=r"\rm{g}/\rm{cm}^3")
+                units=r"\rm{g}/\rm{cm}^{3}")
 add_flash_field("magx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("magx"),
                 units = r"\mathrm{Gau\ss}")
@@ -174,7 +174,7 @@
                 units = r"\mathrm{Gau\ss}")
 add_flash_field("magp", function=NullFunc, take_log=True,
                 convert_function=_get_convert("magp"),
-                units = r"\rm{erg}\//\/\rm{cm}^{3}")
+                units = r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("divb", function=NullFunc, take_log=False,
                 convert_function=_get_convert("divb"),
                 units = r"\mathrm{Gau\ss}\/\rm{cm}")
@@ -186,10 +186,10 @@
                 units=r"\rm{ratio\/of\/specific\/heats}")
 add_flash_field("gpot", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpot"),
-                units=r"\rm{ergs\//\/g}")
+                units=r"\rm{ergs}/\rm{g}")
 add_flash_field("gpol", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpol"),
-                units = r"\rm{ergs\//\/g}")
+                units = r"\rm{ergs}/\rm{g}")
 add_flash_field("flam", function=NullFunc, take_log=False,
                 convert_function=_get_convert("flam"))
 


diff -r 8db15e870d38beaf2ee597db41f076dc0b946aa4 -r 505e955ac0d2f6545d90d591dbee05694f950bc8 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -773,9 +773,10 @@
             field_name = self.data_source.pf.field_info[f].display_name
 
             if field_name is None:
-                field_name = r'$\rm{'+f+r'}$'
+                field_name = f
             elif field_name.find('$') == -1:
-                field_name = r'$\rm{'+field_name+r'}$'
+                field_name = field_name
+            field_name=r'$\rm{'+field_name+r'}$'
             
             parser = MathTextParser('Agg')
             try:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/59059c2db4b8/
changeset:   59059c2db4b8
branch:      yt
user:        chiffre
date:        2012-10-31 14:05:26
summary:     Wrong changing
affected #:  1 file

diff -r 505e955ac0d2f6545d90d591dbee05694f950bc8 -r 59059c2db4b8c47ece031f2a6c5a0c0660820814 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -773,10 +773,9 @@
             field_name = self.data_source.pf.field_info[f].display_name
 
             if field_name is None:
-                field_name = f
+                field_name = r'$\rm{'+f+r'}$'
             elif field_name.find('$') == -1:
-                field_name = field_name
-            field_name=r'$\rm{'+field_name+r'}$'
+                field_name = r'$\rm{'+field_name+r'}$'
             
             parser = MathTextParser('Agg')
             try:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a9f28dd0dcb9/
changeset:   a9f28dd0dcb9
branch:      yt
user:        MatthewTurk
date:        2012-10-31 15:53:54
summary:     Merged in chiffre/yt (pull request #321)
affected #:  2 files

diff -r 7211b81e0ac61db6682f987a2838caa6bb743e16 -r a9f28dd0dcb90d1d75ec39844c3d36ecae989ee5 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -99,8 +99,8 @@
         add_field(fn1.split("_")[0] + "_Density",
                   function=_get_density(fn1), take_log=True,
                   display_name="%s\/Density" % fn1.split("_")[0],
-                  units = r"\rm{g}/\rm{cm}^3",
-                  projected_units = r"\rm{g}/\rm{cm}^2",
+                  units = r"\rm{g}/\rm{cm}^{3}",
+                  projected_units = r"\rm{g}/\rm{cm}^{2}",
                   )
 
 def _get_convert(fname):
@@ -110,8 +110,8 @@
 
 add_flash_field("dens", function=NullFunc, take_log=True,
                 convert_function=_get_convert("dens"),
-                units=r"\rm{g}/\rm{cm}^3",
-                projected_units = r"\rm{g}/\rm{cm}^2"),
+                units=r"\rm{g}/\rm{cm}^{3}",
+                projected_units = r"\rm{g}/\rm{cm}^{2}"),
 add_flash_field("velx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("velx"),
                 units=r"\rm{cm}/\rm{s}")
@@ -159,10 +159,10 @@
                 units = r"\rm{K}")
 add_flash_field("pres", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pres"),
-                units=r"\rm{erg}\//\/\rm{cm}^{3}")
+                units=r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("pden", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pden"),
-                units=r"\rm{g}/\rm{cm}^3")
+                units=r"\rm{g}/\rm{cm}^{3}")
 add_flash_field("magx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("magx"),
                 units = r"\mathrm{Gau\ss}")
@@ -174,7 +174,7 @@
                 units = r"\mathrm{Gau\ss}")
 add_flash_field("magp", function=NullFunc, take_log=True,
                 convert_function=_get_convert("magp"),
-                units = r"\rm{erg}\//\/\rm{cm}^{3}")
+                units = r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("divb", function=NullFunc, take_log=False,
                 convert_function=_get_convert("divb"),
                 units = r"\mathrm{Gau\ss}\/\rm{cm}")
@@ -186,10 +186,10 @@
                 units=r"\rm{ratio\/of\/specific\/heats}")
 add_flash_field("gpot", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpot"),
-                units=r"\rm{ergs\//\/g}")
+                units=r"\rm{ergs}/\rm{g}")
 add_flash_field("gpol", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpol"),
-                units = r"\rm{ergs\//\/g}")
+                units = r"\rm{ergs}/\rm{g}")
 add_flash_field("flam", function=NullFunc, take_log=False,
                 convert_function=_get_convert("flam"))
 





https://bitbucket.org/yt_analysis/yt-3.0/changeset/a7c6365c00fa/
changeset:   a7c6365c00fa
branch:      yt
user:        MatthewTurk
date:        2012-11-01 23:46:13
summary:     Adding load_amr_grids to the Stream frontend.  This works like
load_uniform_grid.  See the docstring for an example.
affected #:  2 files

diff -r a9f28dd0dcb90d1d75ec39844c3d36ecae989ee5 -r a7c6365c00fab74098d33a34cfe3cc8f993f5541 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -29,7 +29,8 @@
       StreamHierarchy, \
       StreamStaticOutput, \
       StreamHandler, \
-      load_uniform_grid
+      load_uniform_grid, \
+      load_amr_grids
 
 from .fields import \
       KnownStreamFields, \


diff -r a9f28dd0dcb90d1d75ec39844c3d36ecae989ee5 -r a7c6365c00fab74098d33a34cfe3cc8f993f5541 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -395,3 +395,103 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                   sim_time=0.0, number_of_particles=0):
+    r"""Load a set of grids of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a sequence of grids of varying resolution of data to be
+    loaded directly into yt and analyzed as would any others.  This comes with
+    several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+        * No consistency checks are performed on the hierarchy
+
+    Parameters
+    ----------
+    grid_data : list of dicts
+        This is a list of dicts.  Each dict must have entries "left_edge",
+        "right_edge", "dimensions", "level", and then any remaining entries are
+        assumed to be fields.  This will be modified in place and can't be
+        assumed to be static..
+    domain_dimensions : array_like
+        This is the domain dimensions of the grid
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    number_of_particles : int, optional
+        If particle fields are included, set this to the number of particles
+
+    Examples
+    --------
+
+    >>> grid_data = [
+    ...     dict(left_edge = [0.0, 0.0, 0.0],
+    ...          right_edge = [1.0, 1.0, 1.],
+    ...          level = 0,
+    ...          dimensions = [32, 32, 32]),
+    ...     dict(left_edge = [0.25, 0.25, 0.25],
+    ...          right_edge = [0.75, 0.75, 0.75],
+    ...          level = 1,
+    ...          dimensions = [32, 32, 32])
+    ... ]
+    ... 
+    >>> for g in grid_data:
+    ...     g["Density"] = np.random.random(g["dimensions"]) * 2**g["level"]
+    ...
+    >>> pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)
+    """
+
+    domain_dimensions = np.array(domain_dimensions)
+    ngrids = len(grid_data)
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros((ngrids, 1), dtype='int32')
+    grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    sfh = StreamDictFieldHandler()
+    for i, g in enumerate(grid_data):
+        grid_left_edges[i,:] = g.pop("left_edge")
+        grid_right_edges[i,:] = g.pop("right_edge")
+        grid_dimensions[i,:] = g.pop("dimensions")
+        grid_levels[i,:] = g.pop("level")
+        sfh[i] = g
+
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        None, # parent_ids is none
+        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        np.zeros(ngrids).reshape((ngrids,1)),
+        sfh,
+    )
+
+    handler.name = "AMRGridData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+    return spf



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f227fad041ca/
changeset:   f227fad041ca
branch:      yt
user:        MatthewTurk
date:        2012-11-02 19:17:46
summary:     Backporting from a PR the IPython detection
affected #:  1 file

diff -r a7c6365c00fab74098d33a34cfe3cc8f993f5541 -r f227fad041cad0fe4e0ae8ee3e2661f97ce97d15 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import __builtin__
 import time, types, signal, inspect, traceback, sys, pdb, os
 import contextlib
 import warnings, struct, subprocess
@@ -310,7 +311,7 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       ytcfg.getboolean("yt", "ipython_notebook") or \
+       "__IPYTHON__" in dir(__builtin__) or \
        ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d1f7280c3c79/
changeset:   d1f7280c3c79
branch:      yt
user:        ngoldbaum
date:        2012-11-02 20:35:37
summary:     Adding some new features for the timestamp callback.
affected #:  1 file

diff -r f227fad041cad0fe4e0ae8ee3e2661f97ce97d15 -r d1f7280c3c79607e859605e02e5a296c2acfc4b6 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -1168,8 +1168,10 @@
           'kev': 1e-12 * 7.6e-8 / 6.03,
           'mev': 1e-15 * 7.6e-8 / 6.03,
           }
+    _bbox_dict = {'boxstyle': 'square,pad=0.6', 'fc': 'white', 'ec': 'black', 'alpha': 1.0}
 
-    def __init__(self, x, y, units=None, format="{time:.3G} {units}", **kwargs):
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized = False, 
+                 bbox_dict = None, **kwargs):
         """ 
         annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
 
@@ -1177,32 +1179,47 @@
         is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 
         *units* is None, it will attempt to figure out the correct value by which to 
         scale.  The *format* keyword is a template string that will be evaluated and 
-        displayed on the plot.  All other *kwargs* will be passed to the text() 
-        method on the plot axes.  See matplotlib's text() functions for more 
-        information.
+        displayed on the plot.  If *normalized* is true, *x* and *y* are interpreted 
+        as normalized plot coordinates (0,0 is lower-left and 1,1 is upper-right) 
+        otherwise *x* and *y* are assumed to be in plot coordinates. The *bbox_dict* 
+        is an optional dict of arguments for the bbox that frames the timestamp, see 
+        matplotlib's text annotation guide for more details. All other *kwargs* will 
+        be passed to the text() method on the plot axes.  See matplotlib's text() 
+        functions for more information.
         """
         self.x = x
         self.y = y
         self.format = format
         self.units = units
+        self.normalized = normalized
+        if bbox_dict is not None:
+            self.bbox_dict = bbox_dict
+        else:
+            self.bbox_dict = self._bbox_dict
         self.kwargs = {'color': 'w'}
         self.kwargs.update(kwargs)
 
     def __call__(self, plot):
         if self.units is None:
-            t = plot.data.pf.current_time
+            t = plot.data.pf.current_time * plot.data.pf['Time']
             scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
             self.units = 's'
             for k in scale_keys:
                 if t < self._time_conv[k]:
                     break
                 self.units = k
-        t = plot.data.pf.current_time / self._time_conv[self.units.lower()]
+        t = plot.data.pf.current_time * plot.data.pf['Time'] 
+        t /= self._time_conv[self.units.lower()]
         if self.units == 'us':
             self.units = '$\\mu s$'
         s = self.format.format(time=t, units=self.units)
         plot._axes.hold(True)
-        plot._axes.text(self.x, self.y, s, **self.kwargs)
+        if self.normalized:
+            plot._axes.text(self.x, self.y, s, horizontalalignment='center',
+                            verticalalignment='center', 
+                            transform = plot._axes.transAxes, bbox=self.bbox_dict)
+        else:
+            plot._axes.text(self.x, self.y, s, bbox=self.bbox_dict, **self.kwargs)
         plot._axes.hold(False)
 
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/663fdee24975/
changeset:   663fdee24975
branch:      yt
user:        MatthewTurk
date:        2012-11-01 21:34:28
summary:     Adding a TimeSeriesController class, which gets added to plot windows by
default.  This lets you increment and decrement your plot window dataset
pointer.  For instance:

from yt.mods import *
ts = TimeSeriesData.from_filenames("enzo_tiny_cosmology/DD*/*.hierarchy")
sl = SlicePlot(ts, "x", "Density")
sl.annotate_timestamp(-0.5, -0.4, units="years", size=24)
for i in range(len(ts)):
    sl.show()
    sl.controller.inc()
affected #:  3 files

diff -r b54dc03a34eec3fe3b472dcb993bd93dbd5f8f54 -r 663fdee249754c2827db0a2a47748b6a9cc7acce yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import __builtin__
 import time, types, signal, inspect, traceback, sys, pdb, os
 import contextlib
 import warnings, struct, subprocess
@@ -310,7 +311,7 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       ytcfg.getboolean("yt", "ipython_notebook") or \
+       "__IPYTHON__" in dir(__builtin__) or \
        ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):


diff -r b54dc03a34eec3fe3b472dcb993bd93dbd5f8f54 -r 663fdee249754c2827db0a2a47748b6a9cc7acce yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -1058,3 +1058,47 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
     
+class GroupOwnership(ParallelAnalysisInterface):
+    def __init__(self, items):
+        ParallelAnalysisInterface.__init__(self)
+        self.num_items = len(items)
+        self.items = items
+        assert(self.num_items >= self.comm.size)
+        self.owned = range(self.comm.size)
+        self.pointer = 0
+        if parallel_capable:
+            communication_system.push_with_ids(range(self.size))
+
+    def __del__(self):
+        if parallel_capable:
+            communication_system.pop()
+
+    def inc(self, n=1):
+        old_item = self.item
+        for i in range(n):
+            if self.pointer >= self.num_items - self.comm.size: break
+            self.owned[self.pointer % self.comm.size] += self.comm.size
+            self.pointer += 1
+        if self.item is not old_item:
+            self.switch()
+            
+    def dec(self, n=1):
+        old_item = self.item
+        for i in range(n):
+            if self.pointer == 0: break
+            self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size
+            self.pointer -= 1
+        if self.item is not old_item:
+            self.switch()
+
+    _last = None
+    @property
+    def item(self):
+        own = self.owned[self.comm.rank]
+        if self._last != own:
+            self._item = self.items[own]
+            self._last = own
+        return self._item
+
+    def switch(self):
+        pass


diff -r b54dc03a34eec3fe3b472dcb993bd93dbd5f8f54 -r 663fdee249754c2827db0a2a47748b6a9cc7acce yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -60,6 +60,10 @@
     axis_labels
 from yt.utilities.math_utils import \
     ortho_find
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    GroupOwnership
+from yt.data_objects.time_series import \
+    TimeSeriesData
 
 def invalidate_data(f):
     @wraps(f)
@@ -254,6 +258,13 @@
             self.set_center(center)
         self._initfinished = True
 
+    def _initialize_dataset(self, ts):
+        if not isinstance(ts, TimeSeriesData):
+            if not iterable(ts): ts = [ts]
+            ts = TimeSeriesData(ts)
+        self.controller = TimeSeriesPlotController(ts, self)
+        return self.controller.item
+
     def __getitem__(self, item):
         return self.plots[item]
 
@@ -987,6 +998,8 @@
         >>> p.save('sliceplot')
         
         """
+        # tHis will handle time series data and controllers
+        pf = self._initialize_dataset(pf) 
         axis = fix_axis(axis)
         (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
@@ -1493,3 +1506,18 @@
                                       vmax = self.zmax, cmap = cmap)
         self.image.axes.ticklabel_format(scilimits=(-4,3))
 
+class TimeSeriesPlotController(GroupOwnership):
+    def __init__(self, ts, pw):
+        GroupOwnership.__init__(self, ts)
+        self.pw = pw
+
+    def switch(self):
+        ds = self.pw.data_source
+        new_pf = self.item
+        name = ds._type_name
+        kwargs = dict((n, getattr(ds, n)) for n in ds._con_args)
+        new_ds = getattr(new_pf.h, name)(**kwargs)
+        self.pw.data_source = new_ds
+        self.pw._data_valid = self.pw._plot_valid = False
+        self.pw._recreate_frb()
+        self.pw._setup_plots()



https://bitbucket.org/yt_analysis/yt-3.0/changeset/37dc59a737a9/
changeset:   37dc59a737a9
branch:      yt
user:        MatthewTurk
date:        2012-11-02 21:16:24
summary:     This changes how TimeSeriesData works with PlotWindow objects.  Rather than the
.inc() method of the previous version, this now works by simple iteration:

http://paste.yt-project.org/show/2830/

The .piter() method is also available.

This also fixes what I believe is potentially a large bug that is rarely
encounteres, where parallelism would sometimes look for the shape of a None
object.

GroupOwnership is now orphaned and unless we find a use for it soon, deleted.
affected #:  2 files

diff -r 663fdee249754c2827db0a2a47748b6a9cc7acce -r 37dc59a737a99e4f27e6268c1672710af9b15eb6 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -581,7 +581,9 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = np.zeros(0, dtype=dtype) # This only works for
+                data = np.zeros(0, dtype=dtype) # This only works for
+            elif data is None:
+                data = np.zeros((ncols, 0), dtype=dtype)
             size = data.shape[-1]
             sizes = np.zeros(self.comm.size, dtype='int64')
             outsize = np.array(size, dtype='int64')
@@ -1067,14 +1069,15 @@
         self.owned = range(self.comm.size)
         self.pointer = 0
         if parallel_capable:
-            communication_system.push_with_ids(range(self.size))
+            communication_system.push_with_ids([self.comm.rank])
 
     def __del__(self):
         if parallel_capable:
             communication_system.pop()
 
-    def inc(self, n=1):
+    def inc(self, n = -1):
         old_item = self.item
+        if n == -1: n = self.comm.size
         for i in range(n):
             if self.pointer >= self.num_items - self.comm.size: break
             self.owned[self.pointer % self.comm.size] += self.comm.size
@@ -1082,8 +1085,9 @@
         if self.item is not old_item:
             self.switch()
             
-    def dec(self, n=1):
+    def dec(self, n = -1):
         old_item = self.item
+        if n == -1: n = self.comm.size
         for i in range(n):
             if self.pointer == 0: break
             self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size


diff -r 663fdee249754c2827db0a2a47748b6a9cc7acce -r 37dc59a737a99e4f27e6268c1672710af9b15eb6 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -262,8 +262,29 @@
         if not isinstance(ts, TimeSeriesData):
             if not iterable(ts): ts = [ts]
             ts = TimeSeriesData(ts)
-        self.controller = TimeSeriesPlotController(ts, self)
-        return self.controller.item
+        return ts
+
+    def __iter__(self):
+        for pf in self.ts:
+            mylog.warning("Switching to %s", pf)
+            self._switch_pf(pf)
+            yield self
+
+    def piter(self, *args, **kwargs):
+        for pf in self.ts.piter(*args, **kwargs):
+            self._switch_pf(pf)
+            yield self
+
+    def _switch_pf(self, new_pf):
+        ds = self.data_source
+        name = ds._type_name
+        kwargs = dict((n, getattr(ds, n)) for n in ds._con_args)
+        new_ds = getattr(new_pf.h, name)(**kwargs)
+        self.pf = new_pf
+        self.data_source = new_ds
+        self._data_valid = self._plot_valid = False
+        self._recreate_frb()
+        self._setup_plots()
 
     def __getitem__(self, item):
         return self.plots[item]
@@ -284,7 +305,6 @@
             self._frb._get_data_source_fields()
         else:
             for key in old_fields: self._frb[key]
-        self.pf = self._frb.pf
         self._data_valid = True
         
     def _setup_plots(self):
@@ -999,9 +1019,11 @@
         
         """
         # tHis will handle time series data and controllers
-        pf = self._initialize_dataset(pf) 
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1082,8 +1104,11 @@
         >>> p.save('sliceplot')
         
         """
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1505,19 +1530,3 @@
                                       norm = norm, vmin = self.zmin, 
                                       vmax = self.zmax, cmap = cmap)
         self.image.axes.ticklabel_format(scilimits=(-4,3))
-
-class TimeSeriesPlotController(GroupOwnership):
-    def __init__(self, ts, pw):
-        GroupOwnership.__init__(self, ts)
-        self.pw = pw
-
-    def switch(self):
-        ds = self.pw.data_source
-        new_pf = self.item
-        name = ds._type_name
-        kwargs = dict((n, getattr(ds, n)) for n in ds._con_args)
-        new_ds = getattr(new_pf.h, name)(**kwargs)
-        self.pw.data_source = new_ds
-        self.pw._data_valid = self.pw._plot_valid = False
-        self.pw._recreate_frb()
-        self.pw._setup_plots()



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8e2150167715/
changeset:   8e2150167715
branch:      yt
user:        brittonsmith
date:        2012-11-02 22:01:14
summary:     Merged in MatthewTurk/yt (pull request #322)
affected #:  2 files

diff -r d1f7280c3c79607e859605e02e5a296c2acfc4b6 -r 8e2150167715b63ab413670576ea18a08e9f72bd yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -581,7 +581,9 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = np.zeros(0, dtype=dtype) # This only works for
+                data = np.zeros(0, dtype=dtype) # This only works for
+            elif data is None:
+                data = np.zeros((ncols, 0), dtype=dtype)
             size = data.shape[-1]
             sizes = np.zeros(self.comm.size, dtype='int64')
             outsize = np.array(size, dtype='int64')
@@ -1058,3 +1060,49 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
     
+class GroupOwnership(ParallelAnalysisInterface):
+    def __init__(self, items):
+        ParallelAnalysisInterface.__init__(self)
+        self.num_items = len(items)
+        self.items = items
+        assert(self.num_items >= self.comm.size)
+        self.owned = range(self.comm.size)
+        self.pointer = 0
+        if parallel_capable:
+            communication_system.push_with_ids([self.comm.rank])
+
+    def __del__(self):
+        if parallel_capable:
+            communication_system.pop()
+
+    def inc(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer >= self.num_items - self.comm.size: break
+            self.owned[self.pointer % self.comm.size] += self.comm.size
+            self.pointer += 1
+        if self.item is not old_item:
+            self.switch()
+            
+    def dec(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer == 0: break
+            self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size
+            self.pointer -= 1
+        if self.item is not old_item:
+            self.switch()
+
+    _last = None
+    @property
+    def item(self):
+        own = self.owned[self.comm.rank]
+        if self._last != own:
+            self._item = self.items[own]
+            self._last = own
+        return self._item
+
+    def switch(self):
+        pass


diff -r d1f7280c3c79607e859605e02e5a296c2acfc4b6 -r 8e2150167715b63ab413670576ea18a08e9f72bd yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -60,6 +60,10 @@
     axis_labels
 from yt.utilities.math_utils import \
     ortho_find
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    GroupOwnership
+from yt.data_objects.time_series import \
+    TimeSeriesData
 
 def invalidate_data(f):
     @wraps(f)
@@ -254,6 +258,34 @@
             self.set_center(center)
         self._initfinished = True
 
+    def _initialize_dataset(self, ts):
+        if not isinstance(ts, TimeSeriesData):
+            if not iterable(ts): ts = [ts]
+            ts = TimeSeriesData(ts)
+        return ts
+
+    def __iter__(self):
+        for pf in self.ts:
+            mylog.warning("Switching to %s", pf)
+            self._switch_pf(pf)
+            yield self
+
+    def piter(self, *args, **kwargs):
+        for pf in self.ts.piter(*args, **kwargs):
+            self._switch_pf(pf)
+            yield self
+
+    def _switch_pf(self, new_pf):
+        ds = self.data_source
+        name = ds._type_name
+        kwargs = dict((n, getattr(ds, n)) for n in ds._con_args)
+        new_ds = getattr(new_pf.h, name)(**kwargs)
+        self.pf = new_pf
+        self.data_source = new_ds
+        self._data_valid = self._plot_valid = False
+        self._recreate_frb()
+        self._setup_plots()
+
     def __getitem__(self, item):
         return self.plots[item]
 
@@ -273,7 +305,6 @@
             self._frb._get_data_source_fields()
         else:
             for key in old_fields: self._frb[key]
-        self.pf = self._frb.pf
         self._data_valid = True
         
     def _setup_plots(self):
@@ -986,8 +1017,12 @@
         >>> p.save('sliceplot')
         
         """
+        # tHis will handle time series data and controllers
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1068,8 +1103,11 @@
         >>> p.save('sliceplot')
         
         """
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1491,4 +1529,3 @@
                                       norm = norm, vmin = self.zmin, 
                                       vmax = self.zmax, cmap = cmap)
         self.image.axes.ticklabel_format(scilimits=(-4,3))
-



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d77aaa8e74ca/
changeset:   d77aaa8e74ca
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-05 16:05:46
summary:     Merging from tip.
affected #:  19 files

diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -62,7 +62,7 @@
     _type_name = "streamline"
     _con_args = ('positions')
     sort_by = 't'
-    def __init__(self, positions, fields=None, pf=None, **kwargs):
+    def __init__(self, positions, length = 1.0, fields=None, pf=None, **kwargs):
         """
         This is a streamline, which is a set of points defined as
         being parallel to some vector field.
@@ -78,6 +78,8 @@
         ----------
         positions : array-like
             List of streamline positions
+        length : float
+            The magnitude of the distance; dts will be divided by this
         fields : list of strings, optional
             If you want the object to pre-retrieve a set of fields, supply them
             here.  This is not necessary.
@@ -91,11 +93,11 @@
         --------
 
         >>> from yt.visualization.api import Streamlines
-        >>> streamlines = Streamlines(pf, [0.5]*3)
+        >>> streamlines = Streamlines(pf, [0.5]*3) 
         >>> streamlines.integrate_through_volume()
         >>> stream = streamlines.path(0)
         >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
-
+        
         """
         YTSelectionContainer1D.__init__(self, pf, fields, **kwargs)
         self.positions = positions
@@ -103,6 +105,8 @@
         self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
         self.dts[-1] = self.dts[-1]
+        self.length = length
+        self.dts /= length
         self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])
@@ -119,32 +123,31 @@
         p = np.all((min_streampoint <= RE) & (max_streampoint > LE), axis=1)
         self._grids = self.hierarchy.grids[p]
 
-    #@restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = np.logical_and(self._get_cut_mask(grid),
-                              grid.child_mask)
-        if field == 'dts': return self._dts[grid.id][mask]
-        if field == 't': return self._ts[grid.id][mask]
-        return grid[field][mask]
+        # No child masking here; it happens inside the mask cut
+        mask = self._get_cut_mask(grid) 
+        if field == 'dts': return self._dts[grid.id]
+        if field == 't': return self._ts[grid.id]
+        return grid[field].flat[mask]
+        
 
     def _get_cut_mask(self, grid):
-        mask = np.zeros(grid.ActiveDimensions, dtype='int')
-        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
-                         np.all(self.positions <= grid.RightEdge, axis=1)
+                         np.all(self.positions <= grid.RightEdge, axis=1) 
         pids = np.where(points_in_grid)[0]
-        for i, pos in zip(pids, self.positions[points_in_grid]):
+        mask = np.zeros(points_in_grid.sum(), dtype='int')
+        dts = np.zeros(points_in_grid.sum(), dtype='float64')
+        ts = np.zeros(points_in_grid.sum(), dtype='float64')
+        for mi, (i, pos) in enumerate(zip(pids, self.positions[points_in_grid])):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
+            if grid.child_mask[ci[0], ci[1], ci[2]] == 0: continue
             for j in range(3):
                 ci[j] = min(ci[j], grid.ActiveDimensions[j]-1)
-            if mask[ci[0], ci[1], ci[2]]:
-                continue
-            mask[ci[0], ci[1], ci[2]] = 1
-            dts[ci[0], ci[1], ci[2]] = self.dts[i]
-            ts[ci[0], ci[1], ci[2]] = self.ts[i]
+            mask[mi] = np.ravel_multi_index(ci, grid.ActiveDimensions)
+            dts[mi] = self.dts[i]
+            ts[mi] = self.ts[i]
         self._dts[grid.id] = dts
         self._ts[grid.id] = ts
         return mask


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -53,7 +53,7 @@
 def force_array(item, shape):
     try:
         sh = item.shape
-        return item
+        return item.copy()
     except AttributeError:
         if item:
             return np.ones(shape, dtype='bool')


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/data_objects/tests/test_boolean_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -0,0 +1,353 @@
+from yt.testing import *
+from yt.data_objects.api import add_field
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    def _ID(field, data):
+        width = data.pf.domain_right_edge - data.pf.domain_left_edge
+        min_dx = 1.0/8192
+        delta = width / min_dx
+        x = data['x'] - min_dx / 2.
+        y = data['y'] - min_dx / 2.
+        z = data['z'] - min_dx / 2.
+        xi = x / min_dx
+        yi = y / min_dx
+        zi = z / min_dx
+        index = xi + delta[0] * (yi + delta[1] * zi)
+        index = index.astype('int64')
+        return index
+
+    add_field("ID", function=_ID)
+
+def test_boolean_spheres_no_overlap():
+    r"""Test to make sure that boolean objects (spheres, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping spheres. This also checks that the original spheres
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.25, 0.25, 0.25], 0.15)
+        sp2 = pf.h.sphere([0.75, 0.75, 0.75], 0.15)
+        # Store the original indices
+        i1 = sp1['ID']
+        i1.sort()
+        i2 = sp2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # empty
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # only sp1
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = sp1['ID']
+        new_i1.sort()
+        new_i2 = sp2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+ 
+def test_boolean_spheres_overlap():
+    r"""Test to make sure that boolean objects (spheres, overlap)
+    behave the way we expect.
+
+    Test overlapping spheres.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.45, 0.45, 0.45], 0.15)
+        sp2 = pf.h.sphere([0.55, 0.55, 0.55], 0.15)
+        # Get indices of both.
+        i1 = sp1['ID']
+        i2 = sp2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # overlap (a lens)
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # sp1 - sp2 (sphere with bite)
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination (H2)
+        # Now make sure the indices also behave as we expect.
+        lens = np.intersect1d(i1, i2)
+        apple = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, lens
+        yield assert_array_equal, b2, apple
+        yield assert_array_equal, b3, both
+
+def test_boolean_regions_no_overlap():
+    r"""Test to make sure that boolean objects (regions, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping regions. This also checks that the original regions
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.25]*3, [0.2]*3, [0.3]*3)
+        re2 = pf.h.region([0.65]*3, [0.6]*3, [0.7]*3)
+        # Store the original indices
+        i1 = re1['ID']
+        i1.sort()
+        i2 = re2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # empty
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # only re1
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = re1['ID']
+        new_i1.sort()
+        new_i2 = re2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1 
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_regions_overlap():
+    r"""Test to make sure that boolean objects (regions, overlap)
+    behave the way we expect.
+
+    Test overlapping regions.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.55]*3, [0.5]*3, [0.6]*3)
+        re2 = pf.h.region([0.6]*3, [0.55]*3, [0.65]*3)
+        # Get indices of both.
+        i1 = re1['ID']
+        i2 = re2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # overlap (small cube)
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # sp1 - sp2 (large cube with bite)
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination (merged large cubes)
+        # Now make sure the indices also behave as we expect.
+        cube = np.intersect1d(i1, i2)
+        bite_cube = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, cube
+        yield assert_array_equal, b2, bite_cube
+        yield assert_array_equal, b3, both
+
+def test_boolean_cylinders_no_overlap():
+    r"""Test to make sure that boolean objects (cylinders, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping cylinders. This also checks that the original cylinders
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.25]*3, [1, 0, 0], 0.1, 0.1)
+        cyl2 = pf.h.disk([0.75]*3, [1, 0, 0], 0.1, 0.1)
+        # Store the original indices
+        i1 = cyl1['ID']
+        i1.sort()
+        i2 = cyl2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # empty
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # only cyl1
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = cyl1['ID']
+        new_i1.sort()
+        new_i2 = cyl2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_cylinders_overlap():
+    r"""Test to make sure that boolean objects (cylinders, overlap)
+    behave the way we expect.
+
+    Test overlapping cylinders.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.45]*3, [1, 0, 0], 0.2, 0.2)
+        cyl2 = pf.h.disk([0.55]*3, [1, 0, 0], 0.2, 0.2)
+        # Get indices of both.
+        i1 = cyl1['ID']
+        i2 = cyl2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # overlap (vertically extened lens)
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # sp1 - sp2 (disk minus a bite)
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination (merged disks)
+        # Now make sure the indices also behave as we expect.
+        vlens = np.intersect1d(i1, i2)
+        bite_disk = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, vlens
+        yield assert_array_equal, b2, bite_disk
+        yield assert_array_equal, b3, both
+
+def test_boolean_ellipsoids_no_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping ellipsoids. This also checks that the original
+    ellipsoids don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Store the original indices
+        i1 = ell1['ID']
+        i1.sort()
+        i2 = ell2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # empty
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # only cyl1
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = ell1['ID']
+        new_i1.sort()
+        new_i2 = ell2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1 
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_ellipsoids_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, overlap)
+    behave the way we expect.
+
+    Test overlapping ellipsoids.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Get indices of both.
+        i1 = ell1['ID']
+        i2 = ell2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # overlap
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # ell1 - ell2
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # Now make sure the indices also behave as we expect.
+        overlap = np.intersect1d(i1, i2)
+        diff = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, overlap
+        yield assert_array_equal, b2, diff
+        yield assert_array_equal, b3, both
+
+def test_boolean_mix_periodicity():
+    r"""Test that a hybrid boolean region behaves as we expect.
+
+    This also tests nested logic and that periodicity works.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re = pf.h.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
+        sp = pf.h.sphere([0.95]*3, 0.3) # wraps around
+        cyl = pf.h.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around
+        # Get original indices
+        rei = re['ID']
+        spi = sp['ID']
+        cyli = cyl['ID']
+        # Make some booleans
+        # whole box minux spherical bites at corners
+        bo1 = pf.h.boolean([re, "NOT", sp])
+        # sphere plus cylinder
+        bo2 = pf.h.boolean([sp, "OR", cyl])
+        # a jumble, the region minus the sp+cyl
+        bo3 = pf.h.boolean([re, "NOT", "(", sp, "OR", cyl, ")"])
+        # Now make sure the indices also behave as we expect.
+        expect = np.setdiff1d(rei, spi)
+        ii = bo1['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(spi, cyli)
+        ii = bo2['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(spi, cyli)
+        expect = np.setdiff1d(rei, expect)
+        ii = bo3['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/data_objects/tests/test_ellipsoid.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -0,0 +1,35 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","loglevel"] = "50"
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_ellipsoid():
+    # We decompose in different ways
+    cs = [np.array([0.5, 0.5, 0.5]),
+          np.array([0.1, 0.2, 0.3]),
+          np.array([0.8, 0.8, 0.8])]
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs)
+        min_dx = 2.0/pf.domain_dimensions
+        ABC = np.random.random((3, 12)) * 0.1
+        e0s = np.random.random((3, 12))
+        tilts = np.random.random(12)
+        ABC[:,0] = 0.1
+        for i in range(12):
+            for c in cs:
+                A, B, C = reversed(sorted(ABC[:,i]))
+                A = max(A, min_dx[0])
+                B = max(B, min_dx[1])
+                C = max(C, min_dx[2])
+                e0 = e0s[:,i]
+                tilt = tilts[i]
+                ell = pf.h.ellipsoid(c, A, B, C, e0, tilt)
+                yield assert_equal, np.all(ell["Radius"] <= A), True
+                p = np.array([ell[ax] for ax in 'xyz'])
+                v  = np.zeros_like(ell["Radius"])
+                v += (((p - c[:,None]) * ell._e0[:,None]).sum(axis=0) / ell._A)**2
+                v += (((p - c[:,None]) * ell._e1[:,None]).sum(axis=0) / ell._B)**2
+                v += (((p - c[:,None]) * ell._e2[:,None]).sum(axis=0) / ell._C)**2
+                yield assert_equal, np.all(np.sqrt(v) <= 1.0), True


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,6 +1,4 @@
 from yt.testing import *
-from yt.data_objects.profiles import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
 
 def setup():
     from yt.config import ytcfg
@@ -32,8 +30,30 @@
                 yield assert_equal, np.unique(proj["py"]), uc[yax]
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = proj.to_frb((1.0,'unitary'), 64)
+                for proj_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[proj_field].info['data_source'], \
+                            proj.__str__()
+                    yield assert_equal, frb[proj_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[proj_field].info['field'], \
+                            proj_field
+                    yield assert_equal, frb[proj_field].info['units'], \
+                            pf.field_info[proj_field].get_units()
+                    yield assert_equal, frb[proj_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[proj_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[proj_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[proj_field].info['center'], \
+                            proj.center
+                    yield assert_equal, frb[proj_field].info['weight_field'], \
+                            wf
             # wf == None
             yield assert_equal, wf, None
             v1 = proj["Density"].sum()
             v2 = (dd["Density"] * dd["d%s" % an]).sum()
             yield assert_rel_equal, v1, v2, 10
+
+


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/data_objects/tests/test_slice.py
--- /dev/null
+++ b/yt/data_objects/tests/test_slice.py
@@ -0,0 +1,55 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_slice():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        slc_pos = 0.5
+        # Some simple slice tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                slc = pf.h.slice(ax, slc_pos, ["Ones", "Density"])
+                yield assert_equal, slc["Ones"].sum(), slc["Ones"].size
+                yield assert_equal, slc["Ones"].min(), 1.0
+                yield assert_equal, slc["Ones"].max(), 1.0
+                yield assert_equal, np.unique(slc["px"]), uc[xax]
+                yield assert_equal, np.unique(slc["py"]), uc[yax]
+                yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = slc.to_frb((1.0,'unitary'), 64)
+                for slc_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[slc_field].info['data_source'], \
+                            slc.__str__()
+                    yield assert_equal, frb[slc_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[slc_field].info['field'], \
+                            slc_field
+                    yield assert_equal, frb[slc_field].info['units'], \
+                            pf.field_info[slc_field].get_units()
+                    yield assert_equal, frb[slc_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[slc_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[slc_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[slc_field].info['center'], \
+                            slc.center
+                    yield assert_equal, frb[slc_field].info['coord'], \
+                            slc_pos
+            # wf == None
+            yield assert_equal, wf, None
+
+


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/data_objects/tests/test_streamlines.py
--- /dev/null
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -0,0 +1,22 @@
+from yt.testing import *
+from yt.visualization.api import Streamlines
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_covering_grid():
+    # We decompose in different ways
+    cs = np.mgrid[0.47:0.53:2j,0.47:0.53:2j,0.47:0.53:2j]
+    cs = np.array([a.ravel() for a in cs]).T
+    length = (1.0/128) * 16 # 16 half-widths of a cell
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
+        streams = Streamlines(pf, cs, length=length)
+        streams.integrate_through_volume()
+        for path in (streams.path(i) for i in range(8)):
+            yield assert_rel_equal, path['dts'].sum(), 1.0, 14
+            yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
+            path["Density"]


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -23,6 +23,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import numpy as np
+from yt.utilities.exceptions import *
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     NullFunc, \
@@ -36,8 +38,6 @@
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
     kboltz
-import numpy as np
-from yt.utilities.exceptions import *
 KnownFLASHFields = FieldInfoContainer()
 add_flash_field = KnownFLASHFields.add_field
 
@@ -100,8 +100,8 @@
         add_field(fn1.split("_")[0] + "_Density",
                   function=_get_density(fn1), take_log=True,
                   display_name="%s\/Density" % fn1.split("_")[0],
-                  units = r"\rm{g}/\rm{cm}^3",
-                  projected_units = r"\rm{g}/\rm{cm}^2",
+                  units = r"\rm{g}/\rm{cm}^{3}",
+                  projected_units = r"\rm{g}/\rm{cm}^{2}",
                   )
 
 def _get_convert(fname):
@@ -111,8 +111,8 @@
 
 add_flash_field("dens", function=NullFunc, take_log=True,
                 convert_function=_get_convert("dens"),
-                units=r"\rm{g}/\rm{cm}^3",
-                projected_units = r"\rm{g}/\rm{cm}^2"),
+                units=r"\rm{g}/\rm{cm}^{3}",
+                projected_units = r"\rm{g}/\rm{cm}^{2}"),
 add_flash_field("velx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("velx"),
                 units=r"\rm{cm}/\rm{s}")
@@ -158,12 +158,13 @@
 add_flash_field("tion", function=NullFunc, take_log=True,
                 units=r"\rm{K}")
 add_flash_field("tele", function=NullFunc, take_log=True,
+                convert_function=_get_convert("tele"),
                 units = r"\rm{K}")
 add_flash_field("trad", function=NullFunc, take_log=True,
                 units = r"\rm{K}")
 add_flash_field("pres", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pres"),
-                units=r"\rm{erg}\//\/\rm{cm}^{3}")
+                units=r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("pion", function=NullFunc, take_log=True,
                 display_name="Ion Pressure",
                 units=r"\rm{J}/\rm{cm}^3")
@@ -184,7 +185,7 @@
                 units=r"\rm{J}")
 add_flash_field("pden", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pden"),
-                units=r"\rm{g}/\rm{cm}^3")
+                units=r"\rm{g}/\rm{cm}^{3}")
 add_flash_field("depo", function=NullFunc, take_log=True,
                 units = r"\rm{ergs}/\rm{g}")
 add_flash_field("ye", function=NullFunc, take_log=True,
@@ -200,7 +201,7 @@
                 units = r"\mathrm{Gau\ss}")
 add_flash_field("magp", function=NullFunc, take_log=True,
                 convert_function=_get_convert("magp"),
-                units = r"\rm{erg}\//\/\rm{cm}^{3}")
+                units = r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("divb", function=NullFunc, take_log=False,
                 convert_function=_get_convert("divb"),
                 units = r"\mathrm{Gau\ss}\/\rm{cm}")
@@ -212,10 +213,10 @@
                 units=r"\rm{ratio\/of\/specific\/heats}")
 add_flash_field("gpot", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpot"),
-                units=r"\rm{ergs\//\/g}")
+                units=r"\rm{ergs}/\rm{g}")
 add_flash_field("gpol", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpol"),
-                units = r"\rm{ergs\//\/g}")
+                units = r"\rm{ergs}/\rm{g}")
 add_flash_field("flam", function=NullFunc, take_log=False,
                 convert_function=_get_convert("flam"))
 add_flash_field("absr", function=NullFunc, take_log=False,


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -29,7 +29,8 @@
       StreamHierarchy, \
       StreamStaticOutput, \
       StreamHandler, \
-      load_uniform_grid
+      load_uniform_grid, \
+      load_amr_grids
 
 from .fields import \
       KnownStreamFields, \


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -357,3 +357,103 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                   sim_time=0.0, number_of_particles=0):
+    r"""Load a set of grids of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a sequence of grids of varying resolution of data to be
+    loaded directly into yt and analyzed as would any others.  This comes with
+    several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+        * No consistency checks are performed on the hierarchy
+
+    Parameters
+    ----------
+    grid_data : list of dicts
+        This is a list of dicts.  Each dict must have entries "left_edge",
+        "right_edge", "dimensions", "level", and then any remaining entries are
+        assumed to be fields.  This will be modified in place and can't be
+        assumed to be static..
+    domain_dimensions : array_like
+        This is the domain dimensions of the grid
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    number_of_particles : int, optional
+        If particle fields are included, set this to the number of particles
+
+    Examples
+    --------
+
+    >>> grid_data = [
+    ...     dict(left_edge = [0.0, 0.0, 0.0],
+    ...          right_edge = [1.0, 1.0, 1.],
+    ...          level = 0,
+    ...          dimensions = [32, 32, 32]),
+    ...     dict(left_edge = [0.25, 0.25, 0.25],
+    ...          right_edge = [0.75, 0.75, 0.75],
+    ...          level = 1,
+    ...          dimensions = [32, 32, 32])
+    ... ]
+    ... 
+    >>> for g in grid_data:
+    ...     g["Density"] = np.random.random(g["dimensions"]) * 2**g["level"]
+    ...
+    >>> pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)
+    """
+
+    domain_dimensions = np.array(domain_dimensions)
+    ngrids = len(grid_data)
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros((ngrids, 1), dtype='int32')
+    grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    sfh = StreamDictFieldHandler()
+    for i, g in enumerate(grid_data):
+        grid_left_edges[i,:] = g.pop("left_edge")
+        grid_right_edges[i,:] = g.pop("right_edge")
+        grid_dimensions[i,:] = g.pop("dimensions")
+        grid_levels[i,:] = g.pop("level")
+        sfh[i] = g
+
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        None, # parent_ids is none
+        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        np.zeros(ngrids).reshape((ngrids,1)),
+        sfh,
+    )
+
+    handler.name = "AMRGridData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+    return spf


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import __builtin__
 import time, types, signal, inspect, traceback, sys, pdb, os
 import contextlib
 import warnings, struct, subprocess
@@ -310,7 +311,7 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       ytcfg.getboolean("yt", "ipython_notebook") or \
+       "__IPYTHON__" in dir(__builtin__) or \
        ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):




diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -188,3 +188,13 @@
     def __str__(self):
         return "There is no old answer available.\n" + \
                str(self.path)
+
+class YTEllipsoidOrdering(YTException):
+    def __init__(self, pf, A, B, C):
+        YTException.__init__(self, pf)
+        self._A = A
+        self._B = B
+        self._C = C
+
+    def __str__(self):
+        return "Must have A>=B>=C"


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -578,7 +578,9 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = np.zeros(0, dtype=dtype) # This only works for
+                data = np.zeros(0, dtype=dtype) # This only works for
+            elif data is None:
+                data = np.zeros((ncols, 0), dtype=dtype)
             size = data.shape[-1]
             sizes = np.zeros(self.comm.size, dtype='int64')
             outsize = np.array(size, dtype='int64')
@@ -1055,3 +1057,49 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
     
+class GroupOwnership(ParallelAnalysisInterface):
+    def __init__(self, items):
+        ParallelAnalysisInterface.__init__(self)
+        self.num_items = len(items)
+        self.items = items
+        assert(self.num_items >= self.comm.size)
+        self.owned = range(self.comm.size)
+        self.pointer = 0
+        if parallel_capable:
+            communication_system.push_with_ids([self.comm.rank])
+
+    def __del__(self):
+        if parallel_capable:
+            communication_system.pop()
+
+    def inc(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer >= self.num_items - self.comm.size: break
+            self.owned[self.pointer % self.comm.size] += self.comm.size
+            self.pointer += 1
+        if self.item is not old_item:
+            self.switch()
+            
+    def dec(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer == 0: break
+            self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size
+            self.pointer -= 1
+        if self.item is not old_item:
+            self.switch()
+
+    _last = None
+    @property
+    def item(self):
+        own = self.owned[self.comm.rank]
+        if self._last != own:
+            self._item = self.items[own]
+            self._last = own
+        return self._item
+
+    def switch(self):
+        pass


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,6 +29,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
+from yt.data_objects.image_array import ImageArray
 from yt.utilities.lib.misc_utilities import \
     pixelize_cylinder
 import _MPL
@@ -135,8 +136,9 @@
                              self.bounds, int(self.antialias),
                              self._period, int(self.periodic),
                              ).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
     def __setitem__(self, item, val):
         self.data[item] = val
@@ -148,6 +150,28 @@
             if f not in exclude:
                 self[f]
 
+    def _get_info(self, item):
+        info = {}
+        info['data_source'] = self.data_source.__str__()  
+        info['axis'] = self.data_source.axis
+        info['field'] = str(item)
+        info['units'] = self.data_source.pf.field_info[item].get_units()
+        info['xlim'] = self.bounds[:2]
+        info['ylim'] = self.bounds[2:]
+        info['length_to_cm'] = self.data_source.pf['cm']
+        info['projected_units'] = \
+                self.data_source.pf.field_info[item].get_projected_units()
+        info['center'] = self.data_source.center
+        try:
+            info['coord'] = self.data_source.coord
+        except AttributeError:
+            pass
+        try:
+            info['weight_field'] = self.data_source.weight_field
+        except AttributeError:
+            pass
+        return info
+
     def convert_to_pixel(self, coords):
         r"""This function converts coordinates in code-space to pixel-space.
 


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -372,12 +372,10 @@
 class StreamlineCallback(PlotCallback):
     _type_name = "streamlines"
     def __init__(self, field_x, field_y, factor = 16,
-                 density = 1, arrowsize = 1, arrowstyle = None,
-                 color = None, normalize = False):
+                 density = 1, plot_args=None):
         """
-        annotate_streamlines(field_x, field_y, factor = 16, density = 1,
-                             arrowsize = 1, arrowstyle = None,
-                             color = None, normalize = False):
+        annotate_streamlines(field_x, field_y, factor = 16,
+                             density = 1, plot_args=None):
 
         Add streamlines to any plot, using the *field_x* and *field_y*
         from the associated data, skipping every *factor* datapoints like
@@ -389,12 +387,8 @@
         self.bv_x = self.bv_y = 0
         self.factor = factor
         self.dens = density
-        self.arrowsize = arrowsize
-        if arrowstyle is None : arrowstyle='-|>'
-        self.arrowstyle = arrowstyle
-        if color is None : color = "#000000"
-        self.color = color
-        self.normalize = normalize
+        if plot_args is None: plot_args = {}
+        self.plot_args = plot_args
         
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -418,15 +412,10 @@
                              plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
                            (x0, x1, y0, y1),).transpose()
-        X,Y = (na.linspace(xx0,xx1,nx,endpoint=True),
-                          na.linspace(yy0,yy1,ny,endpoint=True))
-        if self.normalize:
-            nn = na.sqrt(pixX**2 + pixY**2)
-            pixX /= nn
-            pixY /= nn
-        plot._axes.streamplot(X,Y, pixX, pixY, density=self.dens,
-                              arrowsize=self.arrowsize, arrowstyle=self.arrowstyle,
-                              color=self.color, norm=self.normalize)
+        X,Y = (np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
+        plot._axes.streamplot(X,Y, pixX, pixY, density = self.dens,
+                              **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
@@ -447,30 +436,6 @@
         plot._axes.set_xlabel(self.label)
         plot._axes.set_ylabel(self.label)
 
-class TimeCallback(PlotCallback):
-    _type_name = "time"
-    def __init__(self, format_code='10.7e'):
-        """
-        This annotates the plot with the current simulation time.
-        For now, the time is displayed in seconds.
-        *format_code* can be optionally set, allowing a custom 
-        c-style format code for the time display.
-        """
-        self.format_code = format_code
-        PlotCallback.__init__(self)
-    
-    def __call__(self, plot):
-        current_time = plot.pf.current_time/plot.pf['Time']
-        timestring = format(current_time,self.format_code)
-        base = timestring[:timestring.find('e')]
-        exponent = timestring[timestring.find('e')+1:]
-        if exponent[0] == '+':
-            exponent = exponent[1:]
-        timestring = r'$t\/=\/'+base+''+r'\times\,10^{'+exponent+r'}\, \rm{s}$'
-        from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
-        at = AnchoredText(timestring, prop=dict(size=12), frameon=True, loc=4)
-        plot._axes.add_artist(at)
-
 def get_smallest_appropriate_unit(v, pf):
     max_nu = 1e30
     good_u = None
@@ -641,8 +606,8 @@
                                plot.data[self.field_y],
                                int(nx), int(ny),
                                (x0, x1, y0, y1),).transpose()
-        X = np.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
-        Y = np.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
+        X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
         plot._axes.quiver(X,Y, pixX, pixY)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
@@ -764,13 +729,18 @@
         self.plot_args = plot_args
 
     def __call__(self, plot):
-        if len(self.pos) == 3:
+        xx0, xx1 = plot._axes.get_xlim()
+        yy0, yy1 = plot._axes.get_ylim()
+        if np.array(self.pos).shape == (3,):
             pos = (self.pos[x_dict[plot.data.axis]],
                    self.pos[y_dict[plot.data.axis]])
-        else: pos = self.pos
+        elif np.array(self.pos).shape == (2,):
+            pos = self.pos
         x,y = self.convert_to_plot(plot, pos)
         plot._axes.hold(True)
-        plot._axes.plot((x,),(y,),self.marker, **self.plot_args)
+        plot._axes.scatter(x,y, marker = self.marker, **self.plot_args)
+        plot._axes.set_xlim(xx0,xx1)
+        plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
 
 class SphereCallback(PlotCallback):
@@ -1110,7 +1080,6 @@
     def __call__(self,plot):
         plot._axes.set_title(self.title)
 
-
 class FlashRayDataCallback(PlotCallback):
     _type_name = "flash_ray_data"
     def __init__(self, cmap_name='bone', sample=None):
@@ -1196,8 +1165,10 @@
           'kev': 1e-12 * 7.6e-8 / 6.03,
           'mev': 1e-15 * 7.6e-8 / 6.03,
           }
+    _bbox_dict = {'boxstyle': 'square,pad=0.6', 'fc': 'white', 'ec': 'black', 'alpha': 1.0}
 
-    def __init__(self, x, y, units=None, format="{time:.3G} {units}", **kwargs):
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized = False, 
+                 bbox_dict = None, **kwargs):
         """ 
         annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
 
@@ -1205,32 +1176,47 @@
         is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 
         *units* is None, it will attempt to figure out the correct value by which to 
         scale.  The *format* keyword is a template string that will be evaluated and 
-        displayed on the plot.  All other *kwargs* will be passed to the text() 
-        method on the plot axes.  See matplotlib's text() functions for more 
-        information.
+        displayed on the plot.  If *normalized* is true, *x* and *y* are interpreted 
+        as normalized plot coordinates (0,0 is lower-left and 1,1 is upper-right) 
+        otherwise *x* and *y* are assumed to be in plot coordinates. The *bbox_dict* 
+        is an optional dict of arguments for the bbox that frames the timestamp, see 
+        matplotlib's text annotation guide for more details. All other *kwargs* will 
+        be passed to the text() method on the plot axes.  See matplotlib's text() 
+        functions for more information.
         """
         self.x = x
         self.y = y
         self.format = format
         self.units = units
+        self.normalized = normalized
+        if bbox_dict is not None:
+            self.bbox_dict = bbox_dict
+        else:
+            self.bbox_dict = self._bbox_dict
         self.kwargs = {'color': 'w'}
         self.kwargs.update(kwargs)
 
     def __call__(self, plot):
         if self.units is None:
-            t = plot.data.pf.current_time
+            t = plot.data.pf.current_time * plot.data.pf['Time']
             scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
             self.units = 's'
             for k in scale_keys:
                 if t < self._time_conv[k]:
                     break
                 self.units = k
-        t = plot.data.pf.current_time / self._time_conv[self.units.lower()]
+        t = plot.data.pf.current_time * plot.data.pf['Time'] 
+        t /= self._time_conv[self.units.lower()]
         if self.units == 'us':
             self.units = '$\\mu s$'
         s = self.format.format(time=t, units=self.units)
         plot._axes.hold(True)
-        plot._axes.text(self.x, self.y, s, **self.kwargs)
+        if self.normalized:
+            plot._axes.text(self.x, self.y, s, horizontalalignment='center',
+                            verticalalignment='center', 
+                            transform = plot._axes.transAxes, bbox=self.bbox_dict)
+        else:
+            plot._axes.text(self.x, self.y, s, bbox=self.bbox_dict, **self.kwargs)
         plot._axes.hold(False)
 
 
@@ -1259,3 +1245,4 @@
 
     def __call__(self, plot):
         super(MaterialBoundaryCallback, self).__call__(plot)
+


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -60,6 +60,10 @@
     axis_labels
 from yt.utilities.math_utils import \
     ortho_find
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    GroupOwnership
+from yt.data_objects.time_series import \
+    TimeSeriesData
 
 def invalidate_data(f):
     @wraps(f)
@@ -254,6 +258,34 @@
             self.set_center(center)
         self._initfinished = True
 
+    def _initialize_dataset(self, ts):
+        if not isinstance(ts, TimeSeriesData):
+            if not iterable(ts): ts = [ts]
+            ts = TimeSeriesData(ts)
+        return ts
+
+    def __iter__(self):
+        for pf in self.ts:
+            mylog.warning("Switching to %s", pf)
+            self._switch_pf(pf)
+            yield self
+
+    def piter(self, *args, **kwargs):
+        for pf in self.ts.piter(*args, **kwargs):
+            self._switch_pf(pf)
+            yield self
+
+    def _switch_pf(self, new_pf):
+        ds = self.data_source
+        name = ds._type_name
+        kwargs = dict((n, getattr(ds, n)) for n in ds._con_args)
+        new_ds = getattr(new_pf.h, name)(**kwargs)
+        self.pf = new_pf
+        self.data_source = new_ds
+        self._data_valid = self._plot_valid = False
+        self._recreate_frb()
+        self._setup_plots()
+
     def __getitem__(self, item):
         return self.plots[item]
 
@@ -273,7 +305,6 @@
             self._frb._get_data_source_fields()
         else:
             for key in old_fields: self._frb[key]
-        self.pf = self._frb.pf
         self._data_valid = True
         
     def _setup_plots(self):
@@ -1063,8 +1094,12 @@
         >>> p.save('sliceplot')
         
         """
+        # tHis will handle time series data and controllers
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis])
         slc.get_data(fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
@@ -1167,8 +1202,11 @@
         >>> p.save('sliceplot')
         
         """
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         proj = pf.h.proj(fields, axis, weight_field=weight_field, center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1332,6 +1370,7 @@
     _ext_widget_id = None
     _current_field = None
     _widget_name = "plot_window"
+    _frb_generator = FixedResolutionBuffer
 
     def _setup_plots(self):
         from yt.gui.reason.bottle_mods import PayloadHandler


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -118,7 +118,9 @@
         if length is None:
             length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
-        self.steps = int(length/dx)
+        self.steps = int(length/dx)+1
+        # Fix up the dx.
+        self.dx = 1.0*self.length/self.steps
         self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:
@@ -207,4 +209,5 @@
         
         """
         return YTStreamlineBase(self.streamlines[streamline_id], pf=self.pf)
+                                    length = self.length)
         


diff -r 283b8f4bced60b3c7b957a6ad5ea0057ca7fd082 -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1448,7 +1448,7 @@
             yield self.snapshot()
 
 def allsky_projection(pf, center, radius, nside, field, weight = None,
-                      inner_radius = 10, rotation = None):
+                      inner_radius = 10, rotation = None, source = None):
     r"""Project through a parameter file, through an allsky-method
     decomposition from HEALpix, and return the image plane.
 
@@ -1483,6 +1483,9 @@
         If supplied, the vectors will be rotated by this.  You can construct
         this by, for instance, calling np.array([v1,v2,v3]) where those are the
         three reference planes of an orthogonal frame (see ortho_find).
+    source : data container, default None
+        If this is supplied, this gives the data source from which the all sky
+        projection pulls its data from.
 
     Returns
     -------
@@ -1526,12 +1529,20 @@
     positions += inner_radius * dx * vs
     vs *= radius
     uv = np.ones(3, dtype='float64')
-    grids = pf.h.sphere(center, radius)._grids
+    if source is not None:
+        grids = source._grids
+    else:
+        grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
                                 image, uv, uv, np.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
-        data = [grid[field] * grid.child_mask.astype('float64')
+        if source is not None:
+            data = [grid[field] * source._get_cut_mask(grid) * \
+                grid.child_mask.astype('float64')
+                for field in fields]
+        else:
+            data = [grid[field] * grid.child_mask.astype('float64')
                 for field in fields]
         pg = PartitionedGrid(
             grid.id, data,



https://bitbucket.org/yt_analysis/yt-3.0/changeset/1c409706b361/
changeset:   1c409706b361
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-05 16:26:35
summary:     Fixing a few problems with the merge.  Many of the tests pass,  However, a few
objects need to be updated.
affected #:  3 files

diff -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 -r 1c409706b3611bc85877f659749156c10cbcae6e yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -666,7 +666,7 @@
         as the points in `this` data object with the given *indices*.
         """
         fp = self.field_parameters.copy()
-        return YTSelectedIndicesBase(self, indices, **fp)
+        return YTSelectedIndicesBase(self, indices, field_parameters = fp)
 
     def extract_isocontours(self, field, value, filename = None,
                             rescale = False, sample_values = None):


diff -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 -r 1c409706b3611bc85877f659749156c10cbcae6e yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -950,13 +950,14 @@
     _type_name = "ellipsoid"
     _con_args = ('center', '_A', '_B', '_C', '_e0', '_tilt')
     def __init__(self, center, A, B, C, e0, tilt, fields=None,
-                 pf=None, **kwargs):
+                 pf=None, field_parameters = None):
         """
         By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we
         can define a ellipsoid of any proportion.  Only cells whose centers are
         within the ellipsoid will be selected.
         """
-        AMR3DData.__init__(self, np.array(center), fields, pf, **kwargs)
+        YTSelectionContainer3D.__init__(self, np.array(center), pf,
+                                        field_parameters)
         # make sure the smallest side is not smaller than dx
         if C < self.hierarchy.get_smallest_dx():
             raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())


diff -r d77aaa8e74cab19a6d8ba151e495d4b10815f055 -r 1c409706b3611bc85877f659749156c10cbcae6e yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -208,6 +208,6 @@
         >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
         
         """
-        return YTStreamlineBase(self.streamlines[streamline_id], pf=self.pf)
+        return YTStreamlineBase(self.streamlines[streamline_id], pf=self.pf,
                                     length = self.length)
         



https://bitbucket.org/yt_analysis/yt-3.0/changeset/6017c631ebe5/
changeset:   6017c631ebe5
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-05 16:29:47
summary:     Minor fix to answer testing
affected #:  1 file

diff -r 1c409706b3611bc85877f659749156c10cbcae6e -r 6017c631ebe5a8e6edab077813301e3052d8ae53 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -148,6 +148,7 @@
 
 class AnswerTestingTest(object):
     reference_storage = None
+    result_storage = None
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/40948b564a6f/
changeset:   40948b564a6f
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-05 17:53:23
summary:     Fixing Ellipsoidal data containers.
affected #:  2 files

diff -r 6017c631ebe5a8e6edab077813301e3052d8ae53 -r 40948b564a6f86151cb23fb1982e51faf16da429 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -953,18 +953,45 @@
                  pf=None, field_parameters = None):
         """
         By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we
-        can define a ellipsoid of any proportion.  Only cells whose centers are
-        within the ellipsoid will be selected.
+        can define a ellipsoid of any proportion.  Only cells whose
+        centers are within the ellipsoid will be selected.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the ellipsoid.
+        A : float
+            The magnitude of the largest semi-major axis of the ellipsoid.
+        B : float
+            The magnitude of the medium semi-major axis of the ellipsoid.
+        C : float
+            The magnitude of the smallest semi-major axis of the ellipsoid.
+        e0 : array_like (automatically normalized)
+            the direction of the largest semi-major axis of the ellipsoid
+        tilt : float
+            After the rotation about the z-axis to allign e0 to x in the x-y
+            plane, and then rotating about the y-axis to align e0 completely
+            to the x-axis, tilt is the angle in radians remaining to
+            rotate about the x-axis to align both e1 to the y-axis and e2 to
+            the z-axis.
+        Examples
+        --------
+        >>> pf = load("DD####/DD####")
+        >>> c = [0.5,0.5,0.5]
+        >>> ell = pf.h.ellipsoid(c, 0.1, 0.1, 0.1, np.array([0.1, 0.1, 0.1]), 0.2)
         """
         YTSelectionContainer3D.__init__(self, np.array(center), pf,
                                         field_parameters)
+        # make sure the magnitudes of semi-major axes are in order
+        if A<B or B<C:
+            raise YTEllipsoidOrdering(pf, A, B, C)
         # make sure the smallest side is not smaller than dx
         if C < self.hierarchy.get_smallest_dx():
             raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())
         self._A = A
         self._B = B
         self._C = C
-        self._e0 = e0
+        self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
         self._tilt = tilt
         
         # find the t1 angle needed to rotate about z axis to align e0 to x
@@ -996,85 +1023,3 @@
         self.set_field_parameter('e0', e0)
         self.set_field_parameter('e1', e1)
         self.set_field_parameter('e2', e2)
-        self.DW = self.pf.domain_right_edge - self.pf.domain_left_edge
-        self._refresh_data()
-
-        """
-        Having another function find_ellipsoid_grids is too much work, 
-        can just use the sphere one and forget about checking orientation
-        but feed in the A parameter for radius
-        """
-    def _get_list_of_grids(self, field = None):
-        """
-        This returns the grids that are possibly within the ellipse
-        """
-        grids,ind = self.hierarchy.find_sphere_grids(self.center, self._A)
-        # Now we sort by level
-        grids = grids.tolist()
-        grids.sort(key=lambda x: (x.Level, \
-                                  x.LeftEdge[0], \
-                                  x.LeftEdge[1], \
-                                  x.LeftEdge[2]))
-        self._grids = np.array(grids, dtype = 'object')
-
-    def _is_fully_enclosed(self, grid):
-        """
-        check if all grid corners are inside the ellipsoid
-        """
-        # vector from corner to center
-        vr = (grid._corners - self.center)
-        # 3 possible cases of locations taking periodic BC into account
-        # just listing the components, find smallest later
-        dotarr=np.array([vr, vr + self.DW, vr - self.DW])
-        # these vrdote# finds the product of vr components with e#
-        # square the results
-        # find the smallest
-        # sums it
-        vrdote0_2 = (np.multiply(dotarr, self._e0)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        vrdote1_2 = (np.multiply(dotarr, self._e1)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        vrdote2_2 = (np.multiply(dotarr, self._e2)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        return np.all(vrdote0_2 / self._A**2 + \
-                      vrdote1_2 / self._B**2 + \
-                      vrdote2_2 / self._C**2 <=1.0)
-
-    def _get_cut_mask(self, grid, field = None):
-        """
-        This checks if each cell is inside the ellipsoid
-        """
-        # We have the *property* center, which is not necessarily
-        # the same as the field_parameter
-        if self._is_fully_enclosed(grid):
-            return True # We do not want child masking here
-        if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
-           and grid.id in self._cut_masks:
-            return self._cut_masks[grid.id]
-        Inside = np.zeros(grid["x"].shape, dtype = 'float64')
-        dim = grid["x"].shape
-        # need this to take into account non-cube root grid tiles
-        dot_evec = np.zeros([3, dim[0], dim[1], dim[2]])
-        for i, ax in enumerate('xyz'):
-            # distance to center
-            ar  = grid[ax]-self.center[i]
-            # cases to take into account periodic BC
-            case = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
-            # find which of the 3 cases is smallest in magnitude
-            index = np.abs(case).argmin(axis = 0)
-            # restrict distance to only the smallest cases
-            vec = np.choose(index, case)
-            # sum up to get the dot product with e_vectors
-            dot_evec += np.array([vec * self._e0[i], \
-                                  vec * self._e1[i], \
-                                  vec * self._e2[i]])
-        # Calculate the eqn of ellipsoid, if it is inside
-        # then result should be <= 1.0
-        Inside = dot_evec[0]**2 / self._A**2 + \
-                 dot_evec[1]**2 / self._B**2 + \
-                 dot_evec[2]**2 / self._C**2
-        cm = ((Inside <= 1.0) & grid.child_mask)
-        if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)):
-            self._cut_masks[grid.id] = cm
-        return cm
-


diff -r 6017c631ebe5a8e6edab077813301e3052d8ae53 -r 40948b564a6f86151cb23fb1982e51faf16da429 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -880,3 +880,66 @@
 
 data_collection_selector = DataCollectionSelector
 
+cdef class EllipsoidSelector(SelectorObject):
+    cdef np.float64_t vec[3][3]
+    cdef np.float64_t mag[3]
+    cdef np.float64_t center[3]
+
+    def __init__(self, dobj):
+        cdef int i
+        for i in range(3):
+            self.center[i] = dobj.center[i]
+            self.vec[0][i] = dobj._e0[i]
+            self.vec[1][i] = dobj._e1[i]
+            self.vec[2][i] = dobj._e2[i]
+        self.mag[0] = dobj._A
+        self.mag[1] = dobj._B
+        self.mag[2] = dobj._C
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_grid(self, np.float64_t left_edge[3],
+                               np.float64_t right_edge[3]) nogil:
+        # This is the sphere selection
+        cdef np.float64_t radius2, box_center, relcenter, closest, dist, edge
+        return 1
+        radius2 = self.mag[0] * self.mag[0]
+        cdef int id
+        if (left_edge[0] <= self.center[0] <= right_edge[0] and
+            left_edge[1] <= self.center[1] <= right_edge[1] and
+            left_edge[2] <= self.center[2] <= right_edge[2]):
+            return 1
+        # http://www.gamedev.net/topic/335465-is-this-the-simplest-sphere-aabb-collision-test/
+        dist = 0
+        for i in range(3):
+            box_center = (right_edge[i] + left_edge[i])/2.0
+            relcenter = self.center[i] - box_center
+            edge = right_edge[i] - left_edge[i]
+            closest = relcenter - fclip(relcenter, -edge/2.0, edge/2.0)
+            dist += closest * closest
+        if dist < radius2: return 1
+        return 0
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3],
+                         int eterm[3]) nogil:
+        cdef np.float64_t dot_evec[3]
+        cdef np.float64_t dist
+        cdef int i, j
+        dot_evec[0] = dot_evec[1] = dot_evec[2] = 0
+        # Calculate the rotated dot product
+        for i in range(3): # axis
+            dist = pos[i] - self.center[i]
+            for j in range(3):
+                dot_evec[j] += dist * self.vec[j][i]
+        dist = 0.0
+        for i in range(3):
+            dist += (dot_evec[i] * dot_evec[i])/(self.mag[i] * self.mag[i])
+        if dist <= 1.0: return 1
+        return 0
+
+ellipsoid_selector = EllipsoidSelector
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5f944ce3a26a/
changeset:   5f944ce3a26a
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-05 17:58:18
summary:     Fixing up some slice tests
affected #:  1 file

diff -r 40948b564a6f86151cb23fb1982e51faf16da429 -r 5f944ce3a26abe6eee0d100b6c0c5df5fb50a38c yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -21,7 +21,7 @@
             xax = x_dict[ax]
             yax = y_dict[ax]
             for wf in ["Density", None]:
-                slc = pf.h.slice(ax, slc_pos, ["Ones", "Density"])
+                slc = pf.h.slice(ax, slc_pos)
                 yield assert_equal, slc["Ones"].sum(), slc["Ones"].size
                 yield assert_equal, slc["Ones"].min(), 1.0
                 yield assert_equal, slc["Ones"].max(), 1.0



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5882b7d5dd18/
changeset:   5882b7d5dd18
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-05 19:21:22
summary:     Re-enabling a bunch of fields to test.  This increases both test count and
failure count.
affected #:  1 file

diff -r 5f944ce3a26abe6eee0d100b6c0c5df5fb50a38c -r 5882b7d5dd18d46b0b0a8b355bad41521e5c29d9 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -86,9 +86,10 @@
         if field.startswith("particle"): continue
         if field.startswith("CIC"): continue
         if field.startswith("WeakLensingConvergence"): continue
-        if any(hasattr(v, "ghost_zones")
-               for v in FieldInfo[field].validators):
-            continue
+        skip = False
+        for v in FieldInfo[field].validators:
+            if getattr(v, "ghost_zones", 0) > 0: skip = True
+        if skip: continue
         if FieldInfo[field].particle_type: continue
         for nproc in [1, 4, 8]:
             yield TestFieldAccess(field, nproc)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/7fc56f31b20f/
changeset:   7fc56f31b20f
branch:      yt
user:        MatthewTurk
date:        2012-11-05 20:05:22
summary:     Two major changes:

 * Fixing issue with answer testing.
 * Adding tests for smoothed covering grids
 * Fixing issue that arose with smoothed covering grids, wherein generated
   fields were never being created in the smoothed covering grid but always
   interpolated down.  This resulted in potentially-incorrect masses and
   volumes in smoothed covering grids, when crossing level boundaries.  I think
   this is a very uncommon use case.
affected #:  3 files

diff -r 8e2150167715b63ab413670576ea18a08e9f72bd -r 7fc56f31b20f6520605b33754047758f9a5baccd yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3887,10 +3887,21 @@
         self._get_list_of_grids()
         # We don't generate coordinates here.
         if field == None:
-            fields_to_get = self.fields[:]
+            fields = self.fields[:]
         else:
-            fields_to_get = ensure_list(field)
-        fields_to_get = [f for f in fields_to_get if f not in self.field_data]
+            fields = ensure_list(field)
+        fields_to_get = []
+        for field in fields:
+            if self.field_data.has_key(field): continue
+            if field not in self.hierarchy.field_list:
+                try:
+                    #print "Generating", field
+                    self._generate_field(field)
+                    continue
+                except NeedsOriginalGrid, ngt_exception:
+                    pass
+            fields_to_get.append(field)
+        if len(fields_to_get) == 0: return
         # Note that, thanks to some trickery, we have different dimensions
         # on the field than one might think from looking at the dx and the
         # L/R edges.


diff -r 8e2150167715b63ab413670576ea18a08e9f72bd -r 7fc56f31b20f6520605b33754047758f9a5baccd yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -25,3 +25,24 @@
                                       dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
                                       dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
                     yield assert_equal, f, g["Density"]
+
+def test_smoothed_covering_grid():
+    # We decompose in different ways
+    for level in [0, 1, 2]:
+        for nprocs in [1, 2, 4, 8]:
+            pf = fake_random_pf(16, nprocs = nprocs)
+            dn = pf.refine_by**level 
+            cg = pf.h.smoothed_covering_grid(level, [0.0, 0.0, 0.0],
+                    dn * pf.domain_dimensions)
+            assert_equal( cg["Ones"].max(), 1.0)
+            assert_equal( cg["Ones"].min(), 1.0)
+            assert_equal( cg["CellVolume"].sum(), pf.domain_width.prod())
+            for g in pf.h.grids:
+                if level != g.Level: continue
+                di = g.get_global_startindex()
+                dd = g.ActiveDimensions
+                for i in range(dn):
+                    f = cg["Density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
+                                      dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
+                                      dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
+                    yield assert_equal, f, g["Density"]


diff -r 8e2150167715b63ab413670576ea18a08e9f72bd -r 7fc56f31b20f6520605b33754047758f9a5baccd yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -148,6 +148,7 @@
 
 class AnswerTestingTest(object):
     reference_storage = None
+    result_storage = None
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d7d2590b8a66/
changeset:   d7d2590b8a66
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-05 20:17:08
summary:     Adding sorting keywords to spatial chunking
affected #:  2 files

diff -r 5882b7d5dd18d46b0b0a8b355bad41521e5c29d9 -r d7d2590b8a66c5c6fba756902c3ee3952eb4edd9 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -358,18 +358,18 @@
         return fields_to_return, fields_to_generate
 
 
-    def _chunk(self, dobj, chunking_style, ngz = 0):
+    def _chunk(self, dobj, chunking_style, ngz = 0, **kwargs):
         # A chunk is either None or (grids, size)
         if dobj._current_chunk is None:
             self._identify_base_chunk(dobj)
         if ngz != 0 and chunking_style != "spatial":
             raise NotImplementedError
         if chunking_style == "all":
-            return self._chunk_all(dobj)
+            return self._chunk_all(dobj, **kwargs)
         elif chunking_style == "spatial":
-            return self._chunk_spatial(dobj, ngz)
+            return self._chunk_spatial(dobj, ngz, **kwargs)
         elif chunking_style == "io":
-            return self._chunk_io(dobj)
+            return self._chunk_io(dobj, **kwargs)
         else:
             raise NotImplementedError
 


diff -r 5882b7d5dd18d46b0b0a8b355bad41521e5c29d9 -r d7d2590b8a66c5c6fba756902c3ee3952eb4edd9 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -209,9 +209,15 @@
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", gobjs, dobj.size)
         
-    def _chunk_spatial(self, dobj, ngz):
+    def _chunk_spatial(self, dobj, ngz, sort = None):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        for i,og in enumerate(gobjs):
+        if sort in ("+level", "level"):
+            giter = sorted(gobjs, key = g.Level)
+        elif sort == "-level":
+            giter = sorted(gobjs, key = -g.Level)
+        elif sort is None:
+            giter = gobjs
+        for i,og in enumerate(giter):
             if ngz > 0:
                 g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
             else:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/eb4e39f012a7/
changeset:   eb4e39f012a7
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-05 20:31:31
summary:     Merging from tip of yt repository.
affected #:  4 files

diff -r d7d2590b8a66c5c6fba756902c3ee3952eb4edd9 -r eb4e39f012a7e67102e10c311f942bc25dd5f71c yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -936,8 +936,22 @@
     def get_data(self, field):
         self._get_list_of_grids()
         # We don't generate coordinates here.
-        fields_to_get = ensure_list(field)
-        fields_to_get = [f for f in fields_to_get if f not in self.field_data]
+        if field == None:
+            fields = self.fields[:]
+        else:
+            fields = ensure_list(field)
+        fields_to_get = []
+        for field in fields:
+            if self.field_data.has_key(field): continue
+            if field not in self.hierarchy.field_list:
+                try:
+                    #print "Generating", field
+                    self._generate_field(field)
+                    continue
+                except NeedsOriginalGrid, ngt_exception:
+                    pass
+            fields_to_get.append(field)
+        if len(fields_to_get) == 0: return
         # Note that, thanks to some trickery, we have different dimensions
         # on the field than one might think from looking at the dx and the
         # L/R edges.




diff -r d7d2590b8a66c5c6fba756902c3ee3952eb4edd9 -r eb4e39f012a7e67102e10c311f942bc25dd5f71c yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -26,3 +26,24 @@
                                       dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
                                       dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
                     yield assert_equal, f, g["Density"]
+
+def test_smoothed_covering_grid():
+    # We decompose in different ways
+    for level in [0, 1, 2]:
+        for nprocs in [1, 2, 4, 8]:
+            pf = fake_random_pf(16, nprocs = nprocs)
+            dn = pf.refine_by**level 
+            cg = pf.h.smoothed_covering_grid(level, [0.0, 0.0, 0.0],
+                    dn * pf.domain_dimensions)
+            assert_equal( cg["Ones"].max(), 1.0)
+            assert_equal( cg["Ones"].min(), 1.0)
+            assert_equal( cg["CellVolume"].sum(), pf.domain_width.prod())
+            for g in pf.h.grids:
+                if level != g.Level: continue
+                di = g.get_global_startindex()
+                dd = g.ActiveDimensions
+                for i in range(dn):
+                    f = cg["Density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
+                                      dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
+                                      dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
+                    yield assert_equal, f, g["Density"]





https://bitbucket.org/yt_analysis/yt-3.0/changeset/6ebce99c4799/
changeset:   6ebce99c4799
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-10 04:15:56
summary:     ActiveParticles are now once again accessible from Enzo datasets.
affected #:  1 file

diff -r eb4e39f012a7e67102e10c311f942bc25dd5f71c -r 6ebce99c4799f594312d7fee2e5d3768894cac47 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -375,6 +375,9 @@
         self.grid_left_edge[:] = f["/LeftEdges"][:]
         self.grid_right_edge[:] = f["/RightEdges"][:]
         self.grid_particle_count[:,0] = f["/NumberOfParticles"][:]
+        if "NumberOfActiveParticles" in f:
+            self.grid_active_particle_count[:,0] = \
+                f["/NumberOfActiveParticles"][:]
         levels = f["/Level"][:]
         parents = f["/ParentIDs"][:]
         procs = f["/Processor"][:]
@@ -428,6 +431,7 @@
 
         f.create_dataset("/ActiveDimensions", data=self.grid_dimensions)
         f.create_dataset("/NumberOfParticles", data=self.grid_particle_count[:,0])
+        f.create_dataset("/NumberOfActiveParticles", data=self.grid_active_particle_count[:,0])
 
         f.close()
 
@@ -506,7 +510,7 @@
                     field_list = field_list.union(gf)
             if "AppendActiveParticleType" in self.parameter_file.parameters:
                 ap_fields = self._detect_active_particle_fields()
-                field_list = field_list.union(ap_fields)
+                field_list = list(set(field_list).union(ap_fields))
         else:
             field_list = None
         field_list = self.comm.mpi_bcast(field_list)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/97597f12e163/
changeset:   97597f12e163
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-10 04:18:23
summary:     Merging
affected #:  1 file

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list